diff --git a/.gitignore b/.gitignore index fd693adc3be7b0be708a1a38deb6123383ec399d..d5c7f763cf41939b0e577fc0ce72a2d8bf2436b6 100644 --- a/.gitignore +++ b/.gitignore @@ -46,6 +46,7 @@ psim/ pysim/ *.out *DS_Store +tests/script/api/batchprepare # Doxygen Generated files html/ @@ -108,4 +109,5 @@ TAGS contrib/* !contrib/CMakeLists.txt !contrib/test -sql \ No newline at end of file +sql +debug*/ diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 14c03068d7a32745bb269d07d7903da12253694b..a2b55e3acca0c141a2d550ccabb5bb129adb3d7e 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -269,7 +269,7 @@ pipeline { } } stage('linux test') { - agent{label " slave3_0 || slave15 || slave16 || slave17 "} + agent{label " worker03 || slave215 || slave217 || slave219 "} options { skipDefaultCheckout() } when { changeRequest() @@ -289,7 +289,7 @@ pipeline { cd ${WKC}/tests/parallel_test export DEFAULT_RETRY_TIME=2 date - timeout 2100 time ./run.sh -e -m /home/m.json -t /tmp/cases.task -b ${BRANCH_NAME} -l ${WKDIR}/log -o 480 + timeout 2100 time ./run.sh -e -m /home/m.json -t /tmp/cases.task -b ${BRANCH_NAME}_${BUILD_ID} -l ${WKDIR}/log -o 480 ''' } } diff --git a/cmake/cmake.define b/cmake/cmake.define index 1d34896f9aa1636f493f1d844f2e4d34daff8077..a8bab17aba8a412099b34c6d82c9787468bc89e8 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -46,7 +46,7 @@ ENDIF () IF (TD_WINDOWS) MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}") - SET(COMMON_FLAGS "/w /D_WIN32") + SET(COMMON_FLAGS "/w /D_WIN32 /Zi") SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MANIFEST:NO") # IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900)) # SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18") @@ -71,8 +71,8 @@ ELSE () ENDIF () IF (${SANITIZER} MATCHES "true") - SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment -static-libasan -g3") - SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment -static-libasan -g3") + SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3") + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3") MESSAGE(STATUS "Will compile with Address Sanitizer!") ELSE () SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3") diff --git a/cmake/cmake.options b/cmake/cmake.options index c77b580c17e6d7c7f32ffa24fddd01ecb0f1e394..ab3c5ac1ad08b98ee2dbe09692584be63e477d71 100644 --- a/cmake/cmake.options +++ b/cmake/cmake.options @@ -49,7 +49,7 @@ IF(${TD_WINDOWS}) option( BUILD_TEST "If build unit tests using googletest" - OFF + ON ) ELSE () @@ -146,5 +146,6 @@ option( option( BUILD_WITH_INVERTEDINDEX "If use invertedIndex" - ON + OFF ) + diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index aba955ff3ba68fe5bef617b295330417509b9c9f..31b9936f3e19fa6c3b943f92b9b0664ac7f3897b 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -100,8 +100,10 @@ endif(${BUILD_WITH_NURAFT}) # addr2line if(${BUILD_ADDR2LINE}) - cat("${TD_SUPPORT_DIR}/libdwarf_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) - cat("${TD_SUPPORT_DIR}/addr2line_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + if(NOT ${TD_WINDOWS}) + cat("${TD_SUPPORT_DIR}/libdwarf_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + cat("${TD_SUPPORT_DIR}/addr2line_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + endif(NOT ${TD_WINDOWS}) endif(${BUILD_ADDR2LINE}) # download dependencies @@ -335,45 +337,47 @@ endif(${BUILD_WITH_SQLITE}) # addr2line if(${BUILD_ADDR2LINE}) - check_include_file( "sys/types.h" HAVE_SYS_TYPES_H) - check_include_file( "sys/stat.h" HAVE_SYS_STAT_H ) - check_include_file( "inttypes.h" HAVE_INTTYPES_H ) - check_include_file( "stddef.h" HAVE_STDDEF_H ) - check_include_file( "stdlib.h" HAVE_STDLIB_H ) - check_include_file( "string.h" HAVE_STRING_H ) - check_include_file( "memory.h" HAVE_MEMORY_H ) - check_include_file( "strings.h" HAVE_STRINGS_H ) - check_include_file( "stdint.h" HAVE_STDINT_H ) - check_include_file( "unistd.h" HAVE_UNISTD_H ) - check_include_file( "sgidefs.h" HAVE_SGIDEFS_H ) - check_include_file( "stdafx.h" HAVE_STDAFX_H ) - check_include_file( "elf.h" HAVE_ELF_H ) - check_include_file( "libelf.h" HAVE_LIBELF_H ) - check_include_file( "libelf/libelf.h" HAVE_LIBELF_LIBELF_H) - check_include_file( "alloca.h" HAVE_ALLOCA_H ) - check_include_file( "elfaccess.h" HAVE_ELFACCESS_H) - check_include_file( "sys/elf_386.h" HAVE_SYS_ELF_386_H ) - check_include_file( "sys/elf_amd64.h" HAVE_SYS_ELF_AMD64_H) - check_include_file( "sys/elf_sparc.h" HAVE_SYS_ELF_SPARC_H) - check_include_file( "sys/ia64/elf.h" HAVE_SYS_IA64_ELF_H ) - set(VERSION 0.3.1) - set(PACKAGE_VERSION "\"${VERSION}\"") - configure_file(libdwarf/cmake/config.h.cmake config.h) - file(GLOB_RECURSE LIBDWARF_SOURCES "libdwarf/src/lib/libdwarf/*.c") - add_library(libdwarf STATIC ${LIBDWARF_SOURCES}) - set_target_properties(libdwarf PROPERTIES OUTPUT_NAME "libdwarf") - if(HAVE_LIBELF_H OR HAVE_LIBELF_LIBELF_H) - target_link_libraries(libdwarf PUBLIC libelf) - endif() - target_include_directories(libdwarf SYSTEM PUBLIC "libdwarf/src/lib/libdwarf" ${CMAKE_CURRENT_BINARY_DIR}) - file(READ "addr2line/addr2line.c" ADDR2LINE_CONTENT) - string(REPLACE "static int" "int" ADDR2LINE_CONTENT "${ADDR2LINE_CONTENT}") - string(REPLACE "static void" "void" ADDR2LINE_CONTENT "${ADDR2LINE_CONTENT}") - string(REPLACE "main(" "main_addr2line(" ADDR2LINE_CONTENT "${ADDR2LINE_CONTENT}") - file(WRITE "addr2line/addr2line.c" "${ADDR2LINE_CONTENT}") - add_library(addr2line STATIC "addr2line/addr2line.c") - target_link_libraries(addr2line PUBLIC libdwarf dl z) - target_include_directories(addr2line PUBLIC "libdwarf/src/lib/libdwarf" ) + if(NOT ${TD_WINDOWS}) + check_include_file( "sys/types.h" HAVE_SYS_TYPES_H) + check_include_file( "sys/stat.h" HAVE_SYS_STAT_H ) + check_include_file( "inttypes.h" HAVE_INTTYPES_H ) + check_include_file( "stddef.h" HAVE_STDDEF_H ) + check_include_file( "stdlib.h" HAVE_STDLIB_H ) + check_include_file( "string.h" HAVE_STRING_H ) + check_include_file( "memory.h" HAVE_MEMORY_H ) + check_include_file( "strings.h" HAVE_STRINGS_H ) + check_include_file( "stdint.h" HAVE_STDINT_H ) + check_include_file( "unistd.h" HAVE_UNISTD_H ) + check_include_file( "sgidefs.h" HAVE_SGIDEFS_H ) + check_include_file( "stdafx.h" HAVE_STDAFX_H ) + check_include_file( "elf.h" HAVE_ELF_H ) + check_include_file( "libelf.h" HAVE_LIBELF_H ) + check_include_file( "libelf/libelf.h" HAVE_LIBELF_LIBELF_H) + check_include_file( "alloca.h" HAVE_ALLOCA_H ) + check_include_file( "elfaccess.h" HAVE_ELFACCESS_H) + check_include_file( "sys/elf_386.h" HAVE_SYS_ELF_386_H ) + check_include_file( "sys/elf_amd64.h" HAVE_SYS_ELF_AMD64_H) + check_include_file( "sys/elf_sparc.h" HAVE_SYS_ELF_SPARC_H) + check_include_file( "sys/ia64/elf.h" HAVE_SYS_IA64_ELF_H ) + set(VERSION 0.3.1) + set(PACKAGE_VERSION "\"${VERSION}\"") + configure_file(libdwarf/cmake/config.h.cmake config.h) + file(GLOB_RECURSE LIBDWARF_SOURCES "libdwarf/src/lib/libdwarf/*.c") + add_library(libdwarf STATIC ${LIBDWARF_SOURCES}) + set_target_properties(libdwarf PROPERTIES OUTPUT_NAME "libdwarf") + if(HAVE_LIBELF_H OR HAVE_LIBELF_LIBELF_H) + target_link_libraries(libdwarf PUBLIC libelf) + endif() + target_include_directories(libdwarf SYSTEM PUBLIC "libdwarf/src/lib/libdwarf" ${CMAKE_CURRENT_BINARY_DIR}) + file(READ "addr2line/addr2line.c" ADDR2LINE_CONTENT) + string(REPLACE "static int" "int" ADDR2LINE_CONTENT "${ADDR2LINE_CONTENT}") + string(REPLACE "static void" "void" ADDR2LINE_CONTENT "${ADDR2LINE_CONTENT}") + string(REPLACE "main(" "main_addr2line(" ADDR2LINE_CONTENT "${ADDR2LINE_CONTENT}") + file(WRITE "addr2line/addr2line.c" "${ADDR2LINE_CONTENT}") + add_library(addr2line STATIC "addr2line/addr2line.c") + target_link_libraries(addr2line PUBLIC libdwarf dl z) + target_include_directories(addr2line PUBLIC "libdwarf/src/lib/libdwarf" ) + endif(NOT ${TD_WINDOWS}) endif(${BUILD_ADDR2LINE}) diff --git a/contrib/test/craft/raftMain.c b/contrib/test/craft/raftMain.c index 12be3deb2e33aba9be9b45acd1595a749ab1b2c5..e1c66422b3b90b23ff8c6f01cf07aa8adace5983 100644 --- a/contrib/test/craft/raftMain.c +++ b/contrib/test/craft/raftMain.c @@ -243,7 +243,7 @@ void console(SRaftServer *pRaftServer) { } else if (strcmp(cmd, "dropnode") == 0) { - char host[HOST_LEN]; + char host[HOST_LEN] = {0}; uint32_t port; parseAddr(param1, host, HOST_LEN, &port); uint64_t rid = raftId(host, port); @@ -258,7 +258,7 @@ void console(SRaftServer *pRaftServer) { } else if (strcmp(cmd, "put") == 0) { - char buf[256]; + char buf[256] = {0}; snprintf(buf, sizeof(buf), "%s--%s", param1, param2); putValue(&pRaftServer->raft, buf); diff --git a/docs-cn/02-intro.md b/docs-cn/02-intro.md index 2a56c5e9e667b511003b1ee08801ddcb54ff2ec4..673c2e96b65814fc1cd572d54f948793ed6fa521 100644 --- a/docs-cn/02-intro.md +++ b/docs-cn/02-intro.md @@ -62,7 +62,7 @@ TDengine的主要功能如下:
-![TDengine技术生态图](eco_system.png) +![TDengine Database 技术生态图](eco_system.webp)
图 1. TDengine技术生态图
@@ -119,7 +119,6 @@ TDengine的主要功能如下: - [用 InfluxDB 开源的性能测试工具对比 InfluxDB 和 TDengine](https://www.taosdata.com/blog/2020/01/13/1105.html) - [TDengine 与 OpenTSDB 对比测试](https://www.taosdata.com/blog/2019/08/21/621.html) - [TDengine 与 Cassandra 对比测试](https://www.taosdata.com/blog/2019/08/14/573.html) -- [TDengine 与 InfluxDB 对比测试](https://www.taosdata.com/blog/2019/07/19/419.html) - [TDengine VS InfluxDB ,写入性能大 PK !](https://www.taosdata.com/2021/11/05/3248.html) - [TDengine 和 InfluxDB 查询性能对比测试报告](https://www.taosdata.com/2022/02/22/5969.html) - [TDengine 与 InfluxDB、OpenTSDB、Cassandra、MySQL、ClickHouse 等数据库的对比测试报告](https://www.taosdata.com/downloads/TDengine_Testing_Report_cn.pdf) diff --git a/docs-cn/04-concept/index.md b/docs-cn/04-concept/index.md index ca25595260953f8d941ccaf367bdc45a8325488f..8e97d4a2f43537c1229c8e8ea092ddfc1257dde7 100644 --- a/docs-cn/04-concept/index.md +++ b/docs-cn/04-concept/index.md @@ -29,7 +29,7 @@ title: 数据模型和基本概念 10.3 219 0.31 -Beijing.Chaoyang +California.SanFrancisco 2 @@ -38,7 +38,7 @@ title: 数据模型和基本概念 10.2 220 0.23 -Beijing.Chaoyang +California.SanFrancisco 3 @@ -47,7 +47,7 @@ title: 数据模型和基本概念 11.5 221 0.35 -Beijing.Haidian +California.LosAngeles 3 @@ -56,7 +56,7 @@ title: 数据模型和基本概念 13.4 223 0.29 -Beijing.Haidian +California.LosAngeles 2 @@ -65,7 +65,7 @@ title: 数据模型和基本概念 12.6 218 0.33 -Beijing.Chaoyang +California.SanFrancisco 2 @@ -74,7 +74,7 @@ title: 数据模型和基本概念 11.8 221 0.28 -Beijing.Haidian +California.LosAngeles 2 @@ -83,7 +83,7 @@ title: 数据模型和基本概念 10.3 218 0.25 -Beijing.Chaoyang +California.SanFrancisco 3 @@ -92,7 +92,7 @@ title: 数据模型和基本概念 12.3 221 0.31 -Beijing.Chaoyang +California.SanFrancisco 2 diff --git a/docs-cn/05-get-started/index.md b/docs-cn/05-get-started/index.md index 458df909166b9769af2052ba654699e869d2081c..878d7f020245fbff383308c281fbc3fa28ba5f6c 100644 --- a/docs-cn/05-get-started/index.md +++ b/docs-cn/05-get-started/index.md @@ -132,7 +132,7 @@ Query OK, 2 row(s) in set (0.003128s) taosBenchmark ``` -该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "beijing" 或者 "shanghai"。 +该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "California.SanFrancisco" 或者 "California.LosAngeles"。 这条命令很快完成 1 亿条记录的插入。具体时间取决于硬件性能,即使在一台普通的 PC 服务器往往也仅需十几秒。 @@ -154,10 +154,10 @@ taos> select count(*) from test.meters; taos> select avg(current), max(voltage), min(phase) from test.meters; ``` -查询 location="beijing" 的记录总条数: +查询 location="California.SanFrancisco" 的记录总条数: ```sql -taos> select count(*) from test.meters where location="beijing"; +taos> select count(*) from test.meters where location="California.SanFrancisco"; ``` 查询 groupId=10 的所有记录的平均值、最大值、最小值等: diff --git a/docs-cn/07-develop/01-connect/index.md b/docs-cn/07-develop/01-connect/index.md index ebdefc77b9cc23712626f7543e0e5cc29db3e080..3a15d03f93cee7dd064f29b4911019cae3632b9a 100644 --- a/docs-cn/07-develop/01-connect/index.md +++ b/docs-cn/07-develop/01-connect/index.md @@ -33,7 +33,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速 关键不同点在于: 1. 使用 REST 连接,用户无需安装客户端驱动程序 taosc,具有跨平台易用的优势,但性能要下降 30%左右。 -2. 使用原生连接可以体验 TDengine 的全部功能,如[参数绑定接口](/reference/connector/cpp#参数绑定-api)、[订阅](reference/connector/cpp#数据订阅接口)等等。 +2. 使用原生连接可以体验 TDengine 的全部功能,如[参数绑定接口](/reference/connector/cpp#参数绑定-api)、[订阅](/reference/connector/cpp#订阅和消费-api)等等。 ## 安装客户端驱动 taosc diff --git a/docs-cn/07-develop/02-model/index.mdx b/docs-cn/07-develop/02-model/index.mdx index a060e3c84b8c5b8e25714ce15fb2bc7afc7d49d2..7e2762b6e78393493c2c5b61959e9a6ff57a7b13 100644 --- a/docs-cn/07-develop/02-model/index.mdx +++ b/docs-cn/07-develop/02-model/index.mdx @@ -55,10 +55,10 @@ CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAG TDengine 对每个数据采集点需要独立建表。与标准的关系型数据库一样,一张表有表名,Schema,但除此之外,还可以带有一到多个标签。创建时,需要使用超级表做模板,同时指定标签的具体值。以[表 1](/tdinternal/arch#model_table1)中的智能电表为例,可以使用如下的 SQL 命令建表: ```sql -CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2); +CREATE TABLE d1001 USING meters TAGS ("California.SanFrancisco", 2); ``` -其中 d1001 是表名,meters 是超级表的表名,后面紧跟标签 Location 的具体标签值 ”Beijing.Chaoyang",标签 groupId 的具体标签值 2。虽然在创建表时,需要指定标签值,但可以事后修改。详细细则请见 [TAOS SQL 的表管理](/taos-sql/table) 章节。 +其中 d1001 是表名,meters 是超级表的表名,后面紧跟标签 Location 的具体标签值 "California.SanFrancisco",标签 groupId 的具体标签值 2。虽然在创建表时,需要指定标签值,但可以事后修改。详细细则请见 [TAOS SQL 的表管理](/taos-sql/table) 章节。 :::warning 目前 TDengine 没有从技术层面限制使用一个 database (db1) 的超级表作为模板建立另一个 database (db2) 的子表,后续会禁止这种用法,不建议使用这种方法建表。 @@ -72,10 +72,10 @@ TDengine 建议将数据采集点的全局唯一 ID 作为表名(比如设备序 在某些特殊场景中,用户在写数据时并不确定某个数据采集点的表是否存在,此时可在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表且后面的 USING 语句被忽略。比如: ```sql -INSERT INTO d1001 USING meters TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32); +INSERT INTO d1001 USING meters TAGS ("California.SanFrancisco", 2) VALUES (now, 10.2, 219, 0.32); ``` -上述 SQL 语句将记录`(now, 10.2, 219, 0.32)`插入表 d1001。如果表 d1001 还未创建,则使用超级表 meters 做模板自动创建,同时打上标签值 `"Beijing.Chaoyang", 2`。 +上述 SQL 语句将记录`(now, 10.2, 219, 0.32)`插入表 d1001。如果表 d1001 还未创建,则使用超级表 meters 做模板自动创建,同时打上标签值 `"California.SanFrancisco", 2`。 关于自动建表的详细语法请参见 [插入记录时自动建表](/taos-sql/insert#插入记录时自动建表) 章节。 diff --git a/docs-cn/07-develop/03-insert-data/01-sql-writing.mdx b/docs-cn/07-develop/03-insert-data/01-sql-writing.mdx index e63ffce6dd07366da99fe1f41d0a2a8d7a623f31..99a92573c87d0f90f699a8d1352619f4df4aef39 100644 --- a/docs-cn/07-develop/03-insert-data/01-sql-writing.mdx +++ b/docs-cn/07-develop/03-insert-data/01-sql-writing.mdx @@ -52,7 +52,7 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, :::info -- 要提高写入效率,需要批量写入。一批写入的记录条数越多,插入效率就越高。但一条记录不能超过 16K,一条 SQL 语句总长度不能超过 1M 。 +- 要提高写入效率,需要批量写入。一批写入的记录条数越多,插入效率就越高。但一条记录不能超过 48K,一条 SQL 语句总长度不能超过 1M 。 - TDengine 支持多线程同时写入,要进一步提高写入速度,一个客户端需要打开 20 个以上的线程同时写。但线程数达到一定数量后,无法再提高,甚至还会下降,因为线程频繁切换,带来额外开销。 ::: diff --git a/docs-cn/07-develop/03-insert-data/02-influxdb-line.mdx b/docs-cn/07-develop/03-insert-data/02-influxdb-line.mdx index dedd7f0e70834e21257bda78dd184f5ddc520160..54f02c91475bb5524e259a0aa890363603a86fba 100644 --- a/docs-cn/07-develop/03-insert-data/02-influxdb-line.mdx +++ b/docs-cn/07-develop/03-insert-data/02-influxdb-line.mdx @@ -29,7 +29,7 @@ measurement,tag_set field_set timestamp 例如: ``` -meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500 +meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500 ``` :::note @@ -42,7 +42,6 @@ meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 16 要了解更多可参考:[InfluxDB Line 协议官方文档](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) 和 [TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议) - ## 示例代码 diff --git a/docs-cn/07-develop/03-insert-data/03-opentsdb-telnet.mdx b/docs-cn/07-develop/03-insert-data/03-opentsdb-telnet.mdx index dfbe6efda67b6928999287900637e0a251b86562..2b397e1bdc7a4c76686cd4b6d457a25dbcc2c950 100644 --- a/docs-cn/07-develop/03-insert-data/03-opentsdb-telnet.mdx +++ b/docs-cn/07-develop/03-insert-data/03-opentsdb-telnet.mdx @@ -29,10 +29,10 @@ OpenTSDB 行协议同样采用一行字符串来表示一行数据。OpenTSDB 例如: ```txt -meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3 +meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3 ``` -参考[OpenTSDB Telnet API文档](http://opentsdb.net/docs/build/html/api_telnet/put.html)。 +参考[OpenTSDB Telnet API 文档](http://opentsdb.net/docs/build/html/api_telnet/put.html)。 ## 示例代码 @@ -76,9 +76,9 @@ Query OK, 2 row(s) in set (0.002544s) taos> select tbname, * from `meters.current`; tbname | ts | value | groupid | location | ================================================================================================================================== - t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.249 | 10.800000000 | 3 | Beijing.Haidian | - t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.250 | 11.300000000 | 3 | Beijing.Haidian | - t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.249 | 10.300000000 | 2 | Beijing.Chaoyang | - t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.250 | 12.600000000 | 2 | Beijing.Chaoyang | + t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.249 | 10.800000000 | 3 | California.LosAngeles | + t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.250 | 11.300000000 | 3 | California.LosAngeles | + t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.249 | 10.300000000 | 2 | California.SanFrancisco | + t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.250 | 12.600000000 | 2 | California.SanFrancisco | Query OK, 4 row(s) in set (0.005399s) ``` diff --git a/docs-cn/07-develop/03-insert-data/04-opentsdb-json.mdx b/docs-cn/07-develop/03-insert-data/04-opentsdb-json.mdx index 5d445997d061ca052e4f3673b8e881ea4acf0ade..a15f80a5851ad29605e871f16aed60b68109038a 100644 --- a/docs-cn/07-develop/03-insert-data/04-opentsdb-json.mdx +++ b/docs-cn/07-develop/03-insert-data/04-opentsdb-json.mdx @@ -19,33 +19,33 @@ OpenTSDB JSON 格式协议采用一个 JSON 字符串表示一行或多行数据 ```json [ - { - "metric": "sys.cpu.nice", - "timestamp": 1346846400, - "value": 18, - "tags": { - "host": "web01", - "dc": "lga" - } - }, - { - "metric": "sys.cpu.nice", - "timestamp": 1346846400, - "value": 9, - "tags": { - "host": "web02", - "dc": "lga" - } + { + "metric": "sys.cpu.nice", + "timestamp": 1346846400, + "value": 18, + "tags": { + "host": "web01", + "dc": "lga" } + }, + { + "metric": "sys.cpu.nice", + "timestamp": 1346846400, + "value": 9, + "tags": { + "host": "web02", + "dc": "lga" + } + } ] ``` 与 OpenTSDB 行协议类似, metric 将作为超级表名, timestamp 表示时间戳,value 表示度量值, tags 表示标签集。 - -参考[OpenTSDB HTTP API文档](http://opentsdb.net/docs/build/html/api_http/put.html)。 +参考[OpenTSDB HTTP API 文档](http://opentsdb.net/docs/build/html/api_http/put.html)。 :::note + - 对于 JSON 格式协议,TDengine 并不会自动把所有标签转成 nchar 类型, 字符串将将转为 nchar 类型, 数值将同样转换为 double 类型。 - TDengine 只接收 JSON **数组格式**的字符串,即使一行数据也需要转换成数组形式。 @@ -93,7 +93,7 @@ Query OK, 2 row(s) in set (0.001954s) taos> select * from `meters.current`; ts | value | groupid | location | =================================================================================================================== - 2022-03-28 09:56:51.249 | 10.300000000 | 2.000000000 | Beijing.Chaoyang | - 2022-03-28 09:56:51.250 | 12.600000000 | 2.000000000 | Beijing.Chaoyang | + 2022-03-28 09:56:51.249 | 10.300000000 | 2.000000000 | California.SanFrancisco | + 2022-03-28 09:56:51.250 | 12.600000000 | 2.000000000 | California.SanFrancisco | Query OK, 2 row(s) in set (0.004076s) ``` diff --git a/docs-cn/07-develop/04-query-data/index.mdx b/docs-cn/07-develop/04-query-data/index.mdx index b0a6bad3eaad174a97d8dce4e1ba0125cbf5dc03..824f36ef2f98aac227bdcaf2016d7be0a2e59328 100644 --- a/docs-cn/07-develop/04-query-data/index.mdx +++ b/docs-cn/07-develop/04-query-data/index.mdx @@ -50,14 +50,14 @@ Query OK, 2 row(s) in set (0.001100s) ### 示例一 -在 TAOS Shell,查找北京所有智能电表采集的电压平均值,并按照 location 分组。 +在 TAOS Shell,查找加利福尼亚州所有智能电表采集的电压平均值,并按照 location 分组。 ``` taos> SELECT AVG(voltage) FROM meters GROUP BY location; avg(voltage) | location | ============================================================= - 222.000000000 | Beijing.Haidian | - 219.200000000 | Beijing.Chaoyang | + 222.000000000 | California.LosAngeles | + 219.200000000 | California.SanFrancisco | Query OK, 2 row(s) in set (0.002136s) ``` @@ -88,10 +88,10 @@ taos> SELECT sum(current) FROM d1001 INTERVAL(10s); Query OK, 2 row(s) in set (0.000883s) ``` -降采样操作也适用于超级表,比如:将北京所有智能电表采集的电流值每秒钟求和 +降采样操作也适用于超级表,比如:将加利福尼亚州所有智能电表采集的电流值每秒钟求和 ``` -taos> SELECT SUM(current) FROM meters where location like "Beijing%" INTERVAL(1s); +taos> SELECT SUM(current) FROM meters where location like "California%" INTERVAL(1s); ts | sum(current) | ====================================================== 2018-10-03 14:38:04.000 | 10.199999809 | diff --git a/docs-cn/07-develop/05-continuous-query.mdx b/docs-cn/07-develop/05-continuous-query.mdx index 2fd1b3cc755188f513fe511541a84efa3558d3ea..b2223d15e33114d263b9833df51e4201bc01c772 100644 --- a/docs-cn/07-develop/05-continuous-query.mdx +++ b/docs-cn/07-develop/05-continuous-query.mdx @@ -34,8 +34,8 @@ SLIDING: 连续查询的时间窗口向前滑动的时间间隔 ```sql create table meters (ts timestamp, current float, voltage int, phase float) tags (location binary(64), groupId int); -create table D1001 using meters tags ("Beijing.Chaoyang", 2); -create table D1002 using meters tags ("Beijing.Haidian", 2); +create table D1001 using meters tags ("California.SanFrancisco", 2); +create table D1002 using meters tags ("California.LosAngeles", 2); ... ``` diff --git a/docs-cn/07-develop/06-subscribe.mdx b/docs-cn/07-develop/06-subscribe.mdx index d471c114e827d7c4b40195c2c1b3c8f6a9d26ed4..0f531e07c9dce7dbb03bacebf8e5cbefae82671f 100644 --- a/docs-cn/07-develop/06-subscribe.mdx +++ b/docs-cn/07-develop/06-subscribe.mdx @@ -145,7 +145,7 @@ void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { taos_unsubscribe(tsub, keep); ``` -其第二个参数,用于决定是否在客户端保留订阅的进度信息。如果这个参数是**false**(**0**),那无论下次调用 `taos_subscribe` 时的 `restart` 参数是什么,订阅都只能重新开始。另外,进度信息的保存位置是 _{DataDir}/subscribe/_ 这个目录下,每个订阅有一个与其 `topic` 同名的文件,删掉某个文件,同样会导致下次创建其对应的订阅时只能重新开始。 +其第二个参数,用于决定是否在客户端保留订阅的进度信息。如果这个参数是**false**(**0**),那无论下次调用 `taos_subscribe` 时的 `restart` 参数是什么,订阅都只能重新开始。另外,进度信息的保存位置是 _{DataDir}/subscribe/_ 这个目录下(注:`taos.cfg` 配置文件中 `DataDir` 参数值默认为 **/var/lib/taos/**,但是 Windows 服务器上本身不存在该目录,所以需要在 Windows 的配置文件中修改 `DataDir` 参数值为相应的已存在目录"),每个订阅有一个与其 `topic` 同名的文件,删掉某个文件,同样会导致下次创建其对应的订阅时只能重新开始。 代码介绍完毕,我们来看一下实际的运行效果。假设: @@ -184,8 +184,8 @@ taos> use power; # create super table "meters" taos> create table meters(ts timestamp, current float, voltage int, phase int) tags(location binary(64), groupId int); # create tabes using the schema defined by super table "meters" -taos> create table d1001 using meters tags ("Beijing.Chaoyang", 2); -taos> create table d1002 using meters tags ("Beijing.Haidian", 2); +taos> create table d1001 using meters tags ("California.SanFrancisco", 2); +taos> create table d1002 using meters tags ("California.LosAngeles", 2); # insert some rows taos> insert into d1001 values("2020-08-15 12:00:00.000", 12, 220, 1),("2020-08-15 12:10:00.000", 12.3, 220, 2),("2020-08-15 12:20:00.000", 12.2, 220, 1); taos> insert into d1002 values("2020-08-15 12:00:00.000", 9.9, 220, 1),("2020-08-15 12:10:00.000", 10.3, 220, 1),("2020-08-15 12:20:00.000", 11.2, 220, 1); @@ -193,27 +193,28 @@ taos> insert into d1002 values("2020-08-15 12:00:00.000", 9.9, 220, 1),("2020-08 taos> select * from meters where current > 10; ts | current | voltage | phase | location | groupid | =========================================================================================================== - 2020-08-15 12:10:00.000 | 10.30000 | 220 | 1 | Beijing.Haidian | 2 | - 2020-08-15 12:20:00.000 | 11.20000 | 220 | 1 | Beijing.Haidian | 2 | - 2020-08-15 12:00:00.000 | 12.00000 | 220 | 1 | Beijing.Chaoyang | 2 | - 2020-08-15 12:10:00.000 | 12.30000 | 220 | 2 | Beijing.Chaoyang | 2 | - 2020-08-15 12:20:00.000 | 12.20000 | 220 | 1 | Beijing.Chaoyang | 2 | + 2020-08-15 12:10:00.000 | 10.30000 | 220 | 1 | California.LosAngeles | 2 | + 2020-08-15 12:20:00.000 | 11.20000 | 220 | 1 | California.LosAngeles | 2 | + 2020-08-15 12:00:00.000 | 12.00000 | 220 | 1 | California.SanFrancisco | 2 | + 2020-08-15 12:10:00.000 | 12.30000 | 220 | 2 | California.SanFrancisco | 2 | + 2020-08-15 12:20:00.000 | 12.20000 | 220 | 1 | California.SanFrancisco | 2 | Query OK, 5 row(s) in set (0.004896s) ``` + ### 示例代码 - + - + {/* */} - + {/* @@ -222,20 +223,20 @@ Query OK, 5 row(s) in set (0.004896s) */} - - + + ### 运行示例程序 - + 示例程序会先消费符合查询条件的所有历史数据: ```bash -ts: 1597464000000 current: 12.0 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid : 2 -ts: 1597464600000 current: 12.3 voltage: 220 phase: 2 location: Beijing.Chaoyang groupid : 2 -ts: 1597465200000 current: 12.2 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid : 2 -ts: 1597464600000 current: 10.3 voltage: 220 phase: 1 location: Beijing.Haidian groupid : 2 -ts: 1597465200000 current: 11.2 voltage: 220 phase: 1 location: Beijing.Haidian groupid : 2 +ts: 1597464000000 current: 12.0 voltage: 220 phase: 1 location: California.SanFrancisco groupid : 2 +ts: 1597464600000 current: 12.3 voltage: 220 phase: 2 location: California.SanFrancisco groupid : 2 +ts: 1597465200000 current: 12.2 voltage: 220 phase: 1 location: California.SanFrancisco groupid : 2 +ts: 1597464600000 current: 10.3 voltage: 220 phase: 1 location: California.LosAngeles groupid : 2 +ts: 1597465200000 current: 11.2 voltage: 220 phase: 1 location: California.LosAngeles groupid : 2 ``` 接着,使用 TDengine CLI 向表中新增一条数据: @@ -249,5 +250,5 @@ taos> insert into d1001 values(now, 12.4, 220, 1); 因为这条数据的电流大于 10A,示例程序会将其消费: ``` -ts: 1651146662805 current: 12.4 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid: 2 +ts: 1651146662805 current: 12.4 voltage: 220 phase: 1 location: California.SanFrancisco groupid: 2 ``` diff --git a/docs-cn/07-develop/07-cache.md b/docs-cn/07-develop/07-cache.md index fd31335310d62d792e5173e38a9aa778ee6c6c60..cc59c0353c0d12fb7a8f0f20254087d741361031 100644 --- a/docs-cn/07-develop/07-cache.md +++ b/docs-cn/07-develop/07-cache.md @@ -1,6 +1,6 @@ --- sidebar_label: 缓存 -title: 缓存 +title: 缓存 description: "提供写驱动的缓存管理机制,将每个表最近写入的一条记录持续保存在缓存中,可以提供高性能的最近状态查询。" --- @@ -15,7 +15,7 @@ TDengine 将内存池按块划分进行管理,数据在内存块里是以行 你可以通过函数 last_row() 快速获取一张表或一张超级表的最后一条记录,这样很便于在大屏显示各设备的实时状态或采集值。例如: ```sql -select last_row(voltage) from meters where location='Beijing.Chaoyang'; +select last_row(voltage) from meters where location='California.SanFrancisco'; ``` -该 SQL 语句将获取所有位于北京朝阳区的电表最后记录的电压值。 +该 SQL 语句将获取所有位于加利福尼亚州旧金山市的电表最后记录的电压值。 diff --git a/docs-cn/10-cluster/01-deploy.md b/docs-cn/10-cluster/01-deploy.md index cee140c0ec13bc9c8052a599a2147acc1aa15a8d..b44d2942f2e4672ef6060aa9d084db1d3342e1c8 100644 --- a/docs-cn/10-cluster/01-deploy.md +++ b/docs-cn/10-cluster/01-deploy.md @@ -22,7 +22,7 @@ title: 集群部署 ### 第二步 -建议关闭所有物理节点的防火墙,至少保证端口:6030 - 6042 的 TCP 和 UDP 端口都是开放的。强烈建议先关闭防火墙,集群搭建完毕之后,再来配置端口; +确保集群中所有主机在端口 6030-6042 上的 TCP/UDP 协议能够互通。 ### 第三步 diff --git a/docs-cn/12-taos-sql/02-database.md b/docs-cn/12-taos-sql/02-database.md index 1454d1d34415ea5af9b019ae6eea36923e6d05be..566fec324148fede8d897869656b83e657569f59 100644 --- a/docs-cn/12-taos-sql/02-database.md +++ b/docs-cn/12-taos-sql/02-database.md @@ -20,21 +20,21 @@ CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1]; 3. 数据库名最大长度为 33; 4. 一条 SQL 语句的最大长度为 65480 个字符; 5. 创建数据库时可用的参数有: - - cache: [Description](/reference/config/#cache) - - blocks: [Description](/reference/config/#blocks) - - days: [Description](/reference/config/#days) - - keep: [Description](/reference/config/#keep) - - minRows: [Description](/reference/config/#minrows) - - maxRows: [Description](/reference/config/#maxrows) - - wal: [Description](/reference/config/#wallevel) - - fsync: [Description](/reference/config/#fsync) - - update: [Description](/reference/config/#update) - - cacheLast: [Description](/reference/config/#cachelast) - - replica: [Description](/reference/config/#replica) - - quorum: [Description](/reference/config/#quorum) - - maxVgroupsPerDb: [Description](/reference/config/#maxvgroupsperdb) - - comp: [Description](/reference/config/#comp) - - precision: [Description](/reference/config/#precision) + - cache: [详细说明](/reference/config/#cache) + - blocks: [详细说明](/reference/config/#blocks) + - days: [详细说明](/reference/config/#days) + - keep: [详细说明](/reference/config/#keep) + - minRows: [详细说明](/reference/config/#minrows) + - maxRows: [详细说明](/reference/config/#maxrows) + - wal: [详细说明](/reference/config/#wallevel) + - fsync: [详细说明](/reference/config/#fsync) + - update: [详细说明](/reference/config/#update) + - cacheLast: [详细说明](/reference/config/#cachelast) + - replica: [详细说明](/reference/config/#replica) + - quorum: [详细说明](/reference/config/#quorum) + - maxVgroupsPerDb: [详细说明](/reference/config/#maxvgroupsperdb) + - comp: [详细说明](/reference/config/#comp) + - precision: [详细说明](/reference/config/#precision) 6. 请注意上面列出的所有参数都可以配置在配置文件 `taosd.cfg` 中作为创建数据库时使用的默认配置, `create database` 的参数中明确指定的会覆盖配置文件中的设置。 ::: diff --git a/docs-cn/12-taos-sql/03-table.md b/docs-cn/12-taos-sql/03-table.md index 675c157b3def0d670f771f55b767f3ca4f2a28af..d7235f312933ec46ed427d5da7e2c5a229fa2926 100644 --- a/docs-cn/12-taos-sql/03-table.md +++ b/docs-cn/12-taos-sql/03-table.md @@ -12,7 +12,7 @@ CREATE TABLE [IF NOT EXISTS] tb_name (timestamp_field_name TIMESTAMP, field1_nam 1. 表的第一个字段必须是 TIMESTAMP,并且系统自动将其设为主键; 2. 表名最大长度为 192; -3. 表的每行长度不能超过 16k 个字符;(注意:每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置) +3. 表的每行长度不能超过 48KB;(注意:每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置) 4. 子表名只能由字母、数字和下划线组成,且不能以数字开头,不区分大小写 5. 使用数据类型 binary 或 nchar,需指定其最长的字节数,如 binary(20),表示 20 字节; 6. 为了兼容支持更多形式的表名,TDengine 引入新的转义符 "\`",可以让表名与关键词不冲突,同时不受限于上述表名称合法性约束检查。但是同样具有长度限制要求。使用转义字符以后,不再对转义字符中的内容进行大小写统一。 diff --git a/docs-cn/12-taos-sql/04-stable.md b/docs-cn/12-taos-sql/04-stable.md index a3c227317c85917b64b2477994d335710610ec70..3901427736e80bc8dd0dd87b454947af6e586561 100644 --- a/docs-cn/12-taos-sql/04-stable.md +++ b/docs-cn/12-taos-sql/04-stable.md @@ -86,7 +86,7 @@ ALTER STABLE stb_name MODIFY COLUMN field_name data_type(length); ALTER STABLE stb_name ADD TAG new_tag_name tag_type; ``` -为 STable 增加一个新的标签,并指定新标签的类型。标签总数不能超过 128 个,总长度不超过 16k 个字符。 +为 STable 增加一个新的标签,并指定新标签的类型。标签总数不能超过 128 个,总长度不超过 16KB 。 ### 删除标签 diff --git a/docs-cn/12-taos-sql/05-insert.md b/docs-cn/12-taos-sql/05-insert.md index e542e442b78c9033ae37196f4913a7c67fb19d8b..04118303f3f6517d65d8ecbbe9fdeb774a3177b7 100644 --- a/docs-cn/12-taos-sql/05-insert.md +++ b/docs-cn/12-taos-sql/05-insert.md @@ -67,7 +67,7 @@ INSERT INTO d1001 VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07- 如果用户在写数据时并不确定某个表是否存在,此时可以在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表。自动建表时,要求必须以超级表为模板,并写明数据表的 TAGS 取值。例如: ``` -INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32); +INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32); ``` 也可以在自动建表时,只是指定部分 TAGS 列的取值,未被指定的 TAGS 列将置为 NULL。例如: @@ -79,7 +79,7 @@ INSERT INTO d21001 USING meters (groupId) TAGS (2) VALUES ('2021-07-13 14:06:33. 自动建表语法也支持在一条语句中向多个表插入记录。例如: ``` -INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33) +INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33) d21002 USING meters (groupId) TAGS (2) VALUES ('2021-07-13 14:06:34.255', 10.15, 217, 0.33) d21003 USING meters (groupId) TAGS (2) (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31); ``` @@ -108,13 +108,13 @@ INSERT INTO d1001 FILE '/tmp/csvfile.csv'; 从 2.1.5.0 版本开始,支持在插入来自 CSV 文件的数据时,以超级表为模板来自动创建不存在的数据表。例如: ``` -INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) FILE '/tmp/csvfile.csv'; +INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile.csv'; ``` 也可以在一条语句中向多个表以自动建表的方式插入记录。例如: ``` -INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) FILE '/tmp/csvfile_21001.csv' +INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile_21001.csv' d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv'; ``` @@ -137,7 +137,7 @@ Query OK, 1 row(s) in set (0.001029s) taos> SHOW TABLES; Query OK, 0 row(s) in set (0.000946s) -taos> INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2) VALUES('a'); +taos> INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('a'); DB error: invalid SQL: 'a' (invalid timestamp) (0.039494s) diff --git a/docs-cn/12-taos-sql/06-select.md b/docs-cn/12-taos-sql/06-select.md index 3a860119cfe664f9ac3b0ebd046b5f4f0a612118..92abc4344b7562842fae71a84fe0cb9a168596ed 100644 --- a/docs-cn/12-taos-sql/06-select.md +++ b/docs-cn/12-taos-sql/06-select.md @@ -40,15 +40,15 @@ Query OK, 3 row(s) in set (0.001165s) taos> SELECT * FROM meters; ts | current | voltage | phase | location | groupid | ===================================================================================================================================== - 2018-10-03 14:38:05.500 | 11.80000 | 221 | 0.28000 | Beijing.Haidian | 2 | - 2018-10-03 14:38:16.600 | 13.40000 | 223 | 0.29000 | Beijing.Haidian | 2 | - 2018-10-03 14:38:05.000 | 10.80000 | 223 | 0.29000 | Beijing.Haidian | 3 | - 2018-10-03 14:38:06.500 | 11.50000 | 221 | 0.35000 | Beijing.Haidian | 3 | - 2018-10-03 14:38:04.000 | 10.20000 | 220 | 0.23000 | Beijing.Chaoyang | 3 | - 2018-10-03 14:38:16.650 | 10.30000 | 218 | 0.25000 | Beijing.Chaoyang | 3 | - 2018-10-03 14:38:05.000 | 10.30000 | 219 | 0.31000 | Beijing.Chaoyang | 2 | - 2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 | Beijing.Chaoyang | 2 | - 2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 | Beijing.Chaoyang | 2 | + 2018-10-03 14:38:05.500 | 11.80000 | 221 | 0.28000 | California.LosAngeles | 2 | + 2018-10-03 14:38:16.600 | 13.40000 | 223 | 0.29000 | California.LosAngeles | 2 | + 2018-10-03 14:38:05.000 | 10.80000 | 223 | 0.29000 | California.LosAngeles | 3 | + 2018-10-03 14:38:06.500 | 11.50000 | 221 | 0.35000 | California.LosAngeles | 3 | + 2018-10-03 14:38:04.000 | 10.20000 | 220 | 0.23000 | California.SanFrancisco | 3 | + 2018-10-03 14:38:16.650 | 10.30000 | 218 | 0.25000 | California.SanFrancisco | 3 | + 2018-10-03 14:38:05.000 | 10.30000 | 219 | 0.31000 | California.SanFrancisco | 2 | + 2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 | California.SanFrancisco | 2 | + 2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 | California.SanFrancisco | 2 | Query OK, 9 row(s) in set (0.002022s) ``` @@ -104,8 +104,8 @@ Query OK, 1 row(s) in set (0.000849s) taos> SELECT location, groupid, current FROM d1001 LIMIT 2; location | groupid | current | ====================================================================== - Beijing.Chaoyang | 2 | 10.30000 | - Beijing.Chaoyang | 2 | 12.60000 | + California.SanFrancisco | 2 | 10.30000 | + California.SanFrancisco | 2 | 12.60000 | Query OK, 2 row(s) in set (0.003112s) ``` @@ -284,10 +284,10 @@ SELECT COUNT(TBNAME) FROM meters; taos> SELECT TBNAME, location FROM meters; tbname | location | ================================================================== - d1004 | Beijing.Haidian | - d1003 | Beijing.Haidian | - d1002 | Beijing.Chaoyang | - d1001 | Beijing.Chaoyang | + d1004 | California.LosAngeles | + d1003 | California.LosAngeles | + d1002 | California.SanFrancisco | + d1001 | California.SanFrancisco | Query OK, 4 row(s) in set (0.000881s) taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2; @@ -327,15 +327,15 @@ Query OK, 1 row(s) in set (0.001091s) - <\> 算子也可以写为 != ,请注意,这个算子不能用于数据表第一列的 timestamp 字段。 - like 算子使用通配符字符串进行匹配检查。 - - 在通配符字符串中:'%'(百分号)匹配 0 到任意个字符;'\_'(下划线)匹配单个任意 ASCII 字符。 - - 如果希望匹配字符串中原本就带有的 \_(下划线)字符,那么可以在通配符字符串中写作 `\_`,也即加一个反斜线来进行转义。(从 2.2.0.0 版本开始支持) - - 通配符字符串最长不能超过 20 字节。(从 2.1.6.1 版本开始,通配符字符串的长度放宽到了 100 字节,并可以通过 taos.cfg 中的 maxWildCardsLength 参数来配置这一长度限制。但不建议使用太长的通配符字符串,将有可能严重影响 LIKE 操作的执行性能。) + - 在通配符字符串中:'%'(百分号)匹配 0 到任意个字符;'\_'(下划线)匹配单个任意 ASCII 字符。 + - 如果希望匹配字符串中原本就带有的 \_(下划线)字符,那么可以在通配符字符串中写作 `\_`,也即加一个反斜线来进行转义。(从 2.2.0.0 版本开始支持) + - 通配符字符串最长不能超过 20 字节。(从 2.1.6.1 版本开始,通配符字符串的长度放宽到了 100 字节,并可以通过 taos.cfg 中的 maxWildCardsLength 参数来配置这一长度限制。但不建议使用太长的通配符字符串,将有可能严重影响 LIKE 操作的执行性能。) - 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。 - - 从 2.3.0.0 版本开始,已支持完整的同一列和/或不同列间的 AND/OR 运算。 + - 从 2.3.0.0 版本开始,已支持完整的同一列和/或不同列间的 AND/OR 运算。 - 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如: `((value > 20 AND value < 30) OR (value < 12))`。 - - 从 2.3.0.0 版本开始,允许使用多个时间过滤条件,但首列时间戳的过滤运算结果只能包含一个区间。 + - 从 2.3.0.0 版本开始,允许使用多个时间过滤条件,但首列时间戳的过滤运算结果只能包含一个区间。 - 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。 -- 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功;TIMESTAMP 类型支持非主键的列。 +- 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('California.SanFrancisco', 'California.SanDieo')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功;TIMESTAMP 类型支持非主键的列。 - 从 2.3.0.0 版本开始,条件过滤开始支持正则表达式,关键字 match/nmatch,不区分大小写。 ## 正则表达式过滤 @@ -380,7 +380,7 @@ WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0; :::note -JOIN语句存在如下限制要求: +JOIN 语句存在如下限制要求: - 参与一条语句中 JOIN 操作的表/超级表最多可以有 10 个。 - 在包含 JOIN 操作的查询语句中不支持 FILL。 @@ -409,13 +409,13 @@ SELECT ... FROM (SELECT ... FROM ...) ...; - 在内层和外层查询中,都支持普通的表间/超级表间 JOIN。内层查询的计算结果也可以再参与数据子表的 JOIN 操作。 - 目前内层查询、外层查询均不支持 UNION 操作。 - 内层查询支持的功能特性与非嵌套的查询语句能力是一致的。 - - 内层查询的 ORDER BY 子句一般没有意义,建议避免这样的写法以免无谓的资源消耗。 + - 内层查询的 ORDER BY 子句一般没有意义,建议避免这样的写法以免无谓的资源消耗。 - 与非嵌套的查询语句相比,外层查询所能支持的功能特性存在如下限制: - - 计算函数部分: - - 如果内层查询的结果数据未提供时间戳,那么计算过程依赖时间戳的函数在外层会无法正常工作。例如:TOP, BOTTOM, FIRST, LAST, DIFF。 - - 计算过程需要两遍扫描的函数,在外层查询中无法正常工作。例如:此类函数包括:STDDEV, PERCENTILE。 - - 外层查询中不支持 IN 算子,但在内层中可以使用。 - - 外层查询不支持 GROUP BY。 + - 计算函数部分: + - 如果内层查询的结果数据未提供时间戳,那么计算过程依赖时间戳的函数在外层会无法正常工作。例如:TOP, BOTTOM, FIRST, LAST, DIFF。 + - 计算过程需要两遍扫描的函数,在外层查询中无法正常工作。例如:此类函数包括:STDDEV, PERCENTILE。 + - 外层查询中不支持 IN 算子,但在内层中可以使用。 + - 外层查询不支持 GROUP BY。 ::: diff --git a/docs-cn/12-taos-sql/07-function.md b/docs-cn/12-taos-sql/07-function.md index f6e564419ddaa18931b0f0e0e4e7b5b3219a92f6..68dfd579badc1814852fc7a529831667d4383278 100644 --- a/docs-cn/12-taos-sql/07-function.md +++ b/docs-cn/12-taos-sql/07-function.md @@ -698,7 +698,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause]; ``` -**功能说明**:返回跳过最后 offset_value 个,然后取连续 k 个记录,不忽略 NULL 值。offset_val 可以不输入。此时返回最后的 k 个记录。当有 offset_val 输入的情况下,该函数功能等效于 `order by ts desc LIMIT k OFFSET offset_val`。 +**功能说明**:返回跳过最后 offset_val 个,然后取连续 k 个记录,不忽略 NULL 值。offset_val 可以不输入。此时返回最后的 k 个记录。当有 offset_val 输入的情况下,该函数功能等效于 `order by ts desc LIMIT k OFFSET offset_val`。 **参数范围**:k: [1,100] offset_val: [0,100]。 @@ -1766,6 +1766,8 @@ SELECT TIMEDIFF(ts_val1 | datetime_string1 | ts_col1, ts_val2 | datetime_string2 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天)。 - 如果时间单位 time_unit 未指定, 返回的时间差值精度与当前 DATABASE 设置的时间精度一致。 +**支持的版本**:2.6.0.0 及以后的版本。 + **示例**: ```sql diff --git a/docs-cn/12-taos-sql/08-interval.md b/docs-cn/12-taos-sql/08-interval.md index d62e11b0dbd0ba49ceedb3807e05361f060969b3..b0619ea5ce3759e9bca1234b76e2a16176511547 100644 --- a/docs-cn/12-taos-sql/08-interval.md +++ b/docs-cn/12-taos-sql/08-interval.md @@ -11,7 +11,7 @@ TDengine 支持按时间段窗口切分方式进行聚合结果查询,比如 INTERVAL 子句用于产生相等时间周期的窗口,SLIDING 用以指定窗口向前滑动的时间。每次执行的查询是一个时间窗口,时间窗口随着时间流动向前滑动。在定义连续查询的时候需要指定时间窗口(time window )大小和每次前向增量时间(forward sliding times)。如图,[t0s, t0e] ,[t1s , t1e], [t2s, t2e] 是分别是执行三次连续查询的时间窗口范围,窗口的前向滑动的时间范围 sliding time 标识 。查询过滤、聚合等操作按照每个时间窗口为独立的单位执行。当 SLIDING 与 INTERVAL 相等的时候,滑动窗口即为翻转窗口。 -![时间窗口示意图](/img/sql/timewindow-1.png) +![TDengine Database 时间窗口示意图](./timewindow-1.webp) INTERVAL 和 SLIDING 子句需要配合聚合和选择函数来使用。以下 SQL 语句非法: @@ -33,7 +33,7 @@ _ 从 2.1.5.0 版本开始,INTERVAL 语句允许的最短时间间隔调整为 使用整数(布尔值)或字符串来标识产生记录时候设备的状态量。产生的记录如果具有相同的状态量数值则归属于同一个状态窗口,数值改变后该窗口关闭。如下图所示,根据状态量确定的状态窗口分别是[2019-04-28 14:22:07,2019-04-28 14:22:10]和[2019-04-28 14:22:11,2019-04-28 14:22:12]两个。(状态窗口暂不支持对超级表使用) -![时间窗口示意图](/img/sql/timewindow-3.png) +![TDengine Database 时间窗口示意图](./timewindow-3.webp) 使用 STATE_WINDOW 来确定状态窗口划分的列。例如: @@ -45,7 +45,7 @@ SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status); 会话窗口根据记录的时间戳主键的值来确定是否属于同一个会话。如下图所示,如果设置时间戳的连续的间隔小于等于 12 秒,则以下 6 条记录构成 2 个会话窗口,分别是:[2019-04-28 14:22:10,2019-04-28 14:22:30]和[2019-04-28 14:23:10,2019-04-28 14:23:30]。因为 2019-04-28 14:22:30 与 2019-04-28 14:23:10 之间的时间间隔是 40 秒,超过了连续时间间隔(12 秒)。 -![时间窗口示意图](/img/sql/timewindow-2.png) +![TDengine Database 时间窗口示意图](./timewindow-2.webp) 在 tol_value 时间间隔范围内的结果都认为归属于同一个窗口,如果连续的两条记录的时间超过 tol_val,则自动开启下一个窗口。(会话窗口暂不支持对超级表使用) diff --git a/docs-cn/12-taos-sql/09-limit.md b/docs-cn/12-taos-sql/09-limit.md index 3c86a3862174377e6a00d046fb69627c773fe76e..7673e24a83cc1ba5335b11f29803cf9f3eae26e5 100644 --- a/docs-cn/12-taos-sql/09-limit.md +++ b/docs-cn/12-taos-sql/09-limit.md @@ -7,9 +7,9 @@ title: 边界限制 - 数据库名最大长度为 32。 - 表名最大长度为 192,不包括数据库名前缀和分隔符 -- 每行数据最大长度 16k 个字符, 从 2.1.7.0 版本开始,每行数据最大长度 48k 个字符(注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)。 +- 每行数据最大长度 48KB (注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)。 - 列名最大长度为 64,最多允许 4096 列,最少需要 2 列,第一列必须是时间戳。注:从 2.1.7.0 版本(不含)以前最多允许 4096 列 -- 标签名最大长度为 64,最多允许 128 个,至少要有 1 个标签,一个表中标签值的总长度不超过 16k 个字符。 +- 标签名最大长度为 64,最多允许 128 个,至少要有 1 个标签,一个表中标签值的总长度不超过 16KB 。 - SQL 语句最大长度 1048576 个字符,也可通过客户端配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576。 - SELECT 语句的查询结果,最多允许返回 4096 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。注: 2.1.7.0 版本(不含)之前为最多允许 1024 列 - 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制。 diff --git a/docs-cn/12-taos-sql/12-keywords/index.md b/docs-cn/12-taos-sql/12-keywords/index.md index 608d4e080967cfd97072706cf0963ae669960be6..a64a7161d03f28c131a07e5b1077d7b956e44007 100644 --- a/docs-cn/12-taos-sql/12-keywords/index.md +++ b/docs-cn/12-taos-sql/12-keywords/index.md @@ -23,17 +23,17 @@ title: TDengine 参数限制与保留关键字 去掉了 `` ‘“`\ `` (单双引号、撇号、反斜杠、空格) - 数据库名:不能包含“.”以及特殊字符,不能超过 32 个字符 -- 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过 192 个字符,每行数据最大长度 16k 个字符 -- 表的列名:不能包含特殊字符,不能超过 64 个字符 +- 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过 192 个字节 ,每行数据最大长度 48KB +- 表的列名:不能包含特殊字符,不能超过 64 个字节 - 数据库名、表名、列名,都不能以数字开头,合法的可用字符集是“英文字符、数字和下划线” - 表的列数:不能超过 1024 列,最少需要 2 列,第一列必须是时间戳(从 2.1.7.0 版本开始,改为最多支持 4096 列) -- 记录的最大长度:包括时间戳 8 byte,不能超过 16KB(每个 BINARY/NCHAR 类型的列还会额外占用 2 个 byte 的存储位置) -- 单条 SQL 语句默认最大字符串长度:1048576 byte,但可通过系统配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576 byte +- 记录的最大长度:包括时间戳 8 字节,不能超过 48KB(每个 BINARY/NCHAR 类型的列还会额外占用 2 个 字节 的存储位置) +- 单条 SQL 语句默认最大字符串长度:1048576 字节,但可通过系统配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576 字节 - 数据库副本数:不能超过 3 -- 用户名:不能超过 23 个 byte -- 用户密码:不能超过 15 个 byte +- 用户名:不能超过 23 个 字节 +- 用户密码:不能超过 15 个 字节 - 标签(Tags)数量:不能超过 128 个,可以 0 个 -- 标签的总长度:不能超过 16K byte +- 标签的总长度:不能超过 16KB - 记录条数:仅受存储空间限制 - 表的个数:仅受节点个数限制 - 库的个数:仅受节点个数限制 diff --git a/docs-cn/12-taos-sql/index.md b/docs-cn/12-taos-sql/index.md index 269bc1d2b5ddfa25c42652d8f639bfe2fb1d42e5..cb01b3a918778abc6c7891c1ff185f1db32d3d36 100644 --- a/docs-cn/12-taos-sql/index.md +++ b/docs-cn/12-taos-sql/index.md @@ -7,8 +7,6 @@ description: "TAOS SQL 支持的语法规则、主要查询功能、支持的 SQ TAOS SQL 是用户对 TDengine 进行数据写入和查询的主要工具。TAOS SQL 为了便于用户快速上手,在一定程度上提供与标准 SQL 类似的风格和模式。严格意义上,TAOS SQL 并不是也不试图提供标准的 SQL 语法。此外,由于 TDengine 针对的时序性结构化数据不提供删除功能,因此在 TAO SQL 中不提供数据删除的相关功能。 -TAOS SQL 不支持关键字的缩写,例如 DESCRIBE 不能缩写为 DESC。 - 本章节 SQL 语法遵循如下约定: - <\> 里的内容是用户需要输入的,但不要输入 <\> 本身 @@ -37,4 +35,4 @@ import DocCardList from '@theme/DocCardList'; import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; -``` \ No newline at end of file +``` diff --git a/docs-cn/12-taos-sql/timewindow-1.webp b/docs-cn/12-taos-sql/timewindow-1.webp new file mode 100644 index 0000000000000000000000000000000000000000..82747558e96df752a0010d85be79a4af07e4a1df Binary files /dev/null and b/docs-cn/12-taos-sql/timewindow-1.webp differ diff --git a/docs-cn/12-taos-sql/timewindow-2.webp b/docs-cn/12-taos-sql/timewindow-2.webp new file mode 100644 index 0000000000000000000000000000000000000000..8f1314ae34f7f5c5cca1d3cb80455f555fad38c3 Binary files /dev/null and b/docs-cn/12-taos-sql/timewindow-2.webp differ diff --git a/docs-cn/12-taos-sql/timewindow-3.webp b/docs-cn/12-taos-sql/timewindow-3.webp new file mode 100644 index 0000000000000000000000000000000000000000..5bd16e68e7fd5da6805551e9765975277cd5d4d9 Binary files /dev/null and b/docs-cn/12-taos-sql/timewindow-3.webp differ diff --git a/docs-cn/13-operation/11-optimize.md b/docs-cn/13-operation/11-optimize.md deleted file mode 100644 index 1ca9e8c44492a5882613a0b55d959d7abca8b5f6..0000000000000000000000000000000000000000 --- a/docs-cn/13-operation/11-optimize.md +++ /dev/null @@ -1,100 +0,0 @@ ---- -title: 性能优化 ---- - -因数据行 [update](/train-faq/faq/#update)、表删除、数据过期等原因,TDengine 的磁盘存储文件有可能出现数据碎片,影响查询操作的性能表现。从 2.1.3.0 版本开始,新增 SQL 指令 COMPACT 来启动碎片重整过程: - -```sql -COMPACT VNODES IN (vg_id1, vg_id2, ...) -``` - -COMPACT 命令对指定的一个或多个 VGroup 启动碎片重整,系统会通过任务队列尽快安排重整操作的具体执行。COMPACT 指令所需的 VGroup id,可以通过 `SHOW VGROUPS;` 指令的输出结果获取;而且在 `SHOW VGROUPS;` 中会有一个 compacting 列,值为 2 时表示对应的 VGroup 处于排队等待进行重整的状态,值为 1 时表示正在进行碎片重整,为 0 时则表示并没有处于重整状态(未要求进行重整或已经完成重整)。 - -需要注意的是,碎片重整操作会大幅消耗磁盘 I/O。因此在重整进行期间,有可能会影响节点的写入和查询性能,甚至在极端情况下导致短时间的阻写。 - -## 存储参数优化 - -不同应用场景的数据往往具有不同的数据特征,比如保留天数、副本数、采集频次、记录大小、采集点的数量、压缩等都可完全不同。为获得在存储上的最高效率,TDengine 提供如下存储相关的系统配置参数(既可以作为 create database 指令的参数,也可以写在 taos.cfg 配置文件中用来设定创建新数据库时所采用的默认值): - -| # | 配置参数名称 | 单位 | 含义 | **取值范围** | **缺省值** | -| --- | ------------ | ---- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | ---------- | -| 1 | days | 天 | 一个数据文件存储数据的时间跨度 | 1-3650 | 10 | -| 2 | keep | 天 | (可通过 alter database 修改)数据库中数据保留的天数。 | 1-36500 | 3650 | -| 3 | cache | MB | 内存块的大小 | 1-128 | 16 | -| 4 | blocks | | (可通过 alter database 修改)每个 VNODE(TSDB)中有多少个 cache 大小的内存块。因此一个 VNODE 使用的内存大小粗略为(cache \* blocks)。 | 3-10000 | 6 | -| 5 | quorum | | (可通过 alter database 修改)多副本环境下指令执行的确认数要求 | 1-2 | 1 | -| 6 | minRows | | 文件块中记录的最小条数 | 10-1000 | 100 | -| 7 | maxRows | | 文件块中记录的最大条数 | 200-10000 | 4096 | -| 8 | comp | | (可通过 alter database 修改)文件压缩标志位 | 0:关闭,1:一阶段压缩,2:两阶段压缩 | 2 | -| 9 | walLevel | | (作为 database 的参数时名为 wal;在 taos.cfg 中作为参数时需要写作 walLevel)WAL 级别 | 1:写 WAL,但不执行 fsync;2:写 WAL, 而且执行 fsync | 1 | -| 10 | fsync | 毫秒 | 当 wal 设置为 2 时,执行 fsync 的周期。设置为 0,表示每次写入,立即执行 fsync。 | | 3000 | -| 11 | replica | | (可通过 alter database 修改)副本个数 | 1-3 | 1 | -| 12 | precision | | 时间戳精度标识(2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。)(从 2.1.5.0 版本开始,新增对纳秒时间精度的支持) | ms 表示毫秒,us 表示微秒,ns 表示纳秒 | ms | -| 13 | update | | 是否允许数据更新(从 2.1.7.0 版本开始此参数支持 0 ~ 2 的取值范围,在此之前取值只能是 [0, 1];而 2.0.8.0 之前的版本在 SQL 指令中不支持此参数。) | 0:不允许;1:允许更新整行;2:允许部分列更新。 | 0 | -| 14 | cacheLast | | (可通过 alter database 修改)是否在内存中缓存子表的最近数据(从 2.1.2.0 版本开始此参数支持 0 ~ 3 的取值范围,在此之前取值只能是 [0, 1];而 2.0.11.0 之前的版本在 SQL 指令中不支持此参数。)(2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。) | 0:关闭;1:缓存子表最近一行数据;2:缓存子表每一列的最近的非 NULL 值;3:同时打开缓存最近行和列功能 | 0 | - -对于一个应用场景,可能有多种数据特征的数据并存,最佳的设计是将具有相同数据特征的表放在一个库里,这样一个应用有多个库,而每个库可以配置不同的存储参数,从而保证系统有最优的性能。TDengine 允许应用在创建库时指定上述存储参数,如果指定,该参数就将覆盖对应的系统配置参数。举例,有下述 SQL: - -```sql - CREATE DATABASE demo DAYS 10 CACHE 32 BLOCKS 8 REPLICA 3 UPDATE 1; -``` - -该 SQL 创建了一个库 demo, 每个数据文件存储 10 天数据,内存块为 32 兆字节,每个 VNODE 占用 8 个内存块,副本数为 3,允许更新,而其他参数与系统配置完全一致。 - -一个数据库创建成功后,仅部分参数可以修改并实时生效,其余参数不能修改: - -| **参数名** | **能否修改** | **范围** | **修改语法示例** | -| ----------- | ------------ | ---------------------------------------------------------- | -------------------------------------- | -| name | | | | -| create time | | | | -| ntables | | | | -| vgroups | | | | -| replica | **YES** | 在线 dnode 数目为:
1:1-1;
2:1-2;
\>=3:1-3 | ALTER DATABASE REPLICA _n_ | -| quorum | **YES** | 1-2 | ALTER DATABASE QUORUM _n_ | -| days | | | | -| keep | **YES** | days-365000 | ALTER DATABASE KEEP _n_ | -| cache | | | | -| blocks | **YES** | 3-1000 | ALTER DATABASE BLOCKS _n_ | -| minrows | | | | -| maxrows | | | | -| wal | | | | -| fsync | | | | -| comp | **YES** | 0-2 | ALTER DATABASE COMP _n_ | -| precision | | | | -| status | | | | -| update | | | | -| cachelast | **YES** | 0 \| 1 \| 2 \| 3 | ALTER DATABASE CACHELAST _n_ | - -**说明:**在 2.1.3.0 版本之前,通过 ALTER DATABASE 语句修改这些参数后,需要重启服务器才能生效。 - -TDengine 集群中加入一个新的 dnode 时,涉及集群相关的一些参数必须与已有集群的配置相同,否则不能成功加入到集群中。会进行校验的参数如下: - -- numOfMnodes:系统中管理节点个数。默认值:3。(2.0 版本从 2.0.20.11 开始、2.1 及以上版本从 2.1.6.0 开始,numOfMnodes 默认值改为 1。) -- mnodeEqualVnodeNum: 一个 mnode 等同于 vnode 消耗的个数。默认值:4。 -- offlineThreshold: dnode 离线阈值,超过该时间将导致该 dnode 从集群中删除。单位为秒,默认值:86400\*10(即 10 天)。 -- statusInterval: dnode 向 mnode 报告状态时长。单位为秒,默认值:1。 -- maxTablesPerVnode: 每个 vnode 中能够创建的最大表个数。默认值:1000000。 -- maxVgroupsPerDb: 每个数据库中能够使用的最大 vgroup 个数。 -- arbitrator: 系统中裁决器的 endpoint,缺省为空。 -- timezone、locale、charset 的配置见客户端配置。(2.0.20.0 及以上的版本里,集群中加入新节点已不要求 locale 和 charset 参数取值一致) -- balance:是否启用负载均衡。0:否,1:是。默认值:1。 -- flowctrl:是否启用非阻塞流控。0:否,1:是。默认值:1。 -- slaveQuery:是否启用 slave vnode 参与查询。0:否,1:是。默认值:1。 -- adjustMaster:是否启用 vnode master 负载均衡。0:否,1:是。默认值:1。 - -为方便调试,可通过 SQL 语句临时调整每个 dnode 的日志配置,系统重启后会失效: - -```sql -ALTER DNODE -``` - -- dnode_id: 可以通过 SQL 语句"SHOW DNODES"命令获取 -- config: 要调整的日志参数,在如下列表中取值 - > resetlog 截断旧日志文件,创建一个新日志文件 - > debugFlag < 131 | 135 | 143 > 设置 debugFlag 为 131、135 或者 143 - -例如: - -``` -alter dnode 1 debugFlag 135; -``` diff --git a/docs-cn/14-reference/03-connector/03-connector.mdx b/docs-cn/14-reference/03-connector/03-connector.mdx index c0e714f148a7821e070be38a5484484fdd747e9a..7a4a85276ef4bb4ab829250fcf67076962dbb871 100644 --- a/docs-cn/14-reference/03-connector/03-connector.mdx +++ b/docs-cn/14-reference/03-connector/03-connector.mdx @@ -4,7 +4,7 @@ title: 连接器 TDengine 提供了丰富的应用程序开发接口,为了便于用户快速开发自己的应用,TDengine 支持了多种编程语言的连接器,其中官方连接器包括支持 C/C++、Java、Python、Go、Node.js、C# 和 Rust 的连接器。这些连接器支持使用原生接口(taosc)和 REST 接口(部分语言暂不支持)连接 TDengine 集群。社区开发者也贡献了多个非官方连接器,例如 ADO.NET 连接器、Lua 连接器和 PHP 连接器。 -![image-connector](/img/connector.png) +![TDengine Database connector architecture](./connector.webp) ## 支持的平台 diff --git a/docs-cn/14-reference/03-connector/connector.webp b/docs-cn/14-reference/03-connector/connector.webp new file mode 100644 index 0000000000000000000000000000000000000000..040cf5c26c726b345b2e0e5363dd3c677bec61be Binary files /dev/null and b/docs-cn/14-reference/03-connector/connector.webp differ diff --git a/docs-cn/14-reference/03-connector/csharp.mdx b/docs-cn/14-reference/03-connector/csharp.mdx index c2fbb3b67f640ae01766b417e4247d52da4fd334..1e23df9286bf0cb3bf1db95e334301c04d01ad04 100644 --- a/docs-cn/14-reference/03-connector/csharp.mdx +++ b/docs-cn/14-reference/03-connector/csharp.mdx @@ -18,7 +18,7 @@ import CSAsyncQuery from "../../07-develop/04-query-data/_cs_async.mdx" `TDengine.Connector` 是 TDengine 提供的 C# 语言连接器。C# 开发人员可以通过它开发存取 TDengine 集群数据的 C# 应用软件。 -`TDengine.Connector` 连接器支持通过 TDengine 客户端驱动(taosc)建立与 TDengine 运行实例的连接,提供数据写入、查询、订阅、schemaless 数据写入、参数绑定接口数据写入等功能 `TDengine.Connector` 目前暂未提供 REST 连接方式,用户可以参考 [RESTful APIs](https://docs.taosdata.com//reference/restful-api/) 文档自行编写。 +`TDengine.Connector` 连接器支持通过 TDengine 客户端驱动(taosc)建立与 TDengine 运行实例的连接,提供数据写入、查询、订阅、schemaless 数据写入、参数绑定接口数据写入等功能 `TDengine.Connector` 目前暂未提供 REST 连接方式,用户可以参考 [REST API](/reference/rest-api/) 文档自行编写。 本文介绍如何在 Linux 或 Windows 环境中安装 `TDengine.Connector`,并通过 `TDengine.Connector` 连接 TDengine 集群,进行数据写入、查询等基本操作。 diff --git a/docs-cn/14-reference/03-connector/java.mdx b/docs-cn/14-reference/03-connector/java.mdx index 55abf84fd50fe1c4b5b6a07b28731a00d4534a05..267757160634b28ab198ae0fd759188cf4ccc5cc 100644 --- a/docs-cn/14-reference/03-connector/java.mdx +++ b/docs-cn/14-reference/03-connector/java.mdx @@ -11,7 +11,7 @@ import TabItem from '@theme/TabItem'; `taos-jdbcdriver` 是 TDengine 的官方 Java 语言连接器,Java 开发人员可以通过它开发存取 TDengine 数据库的应用软件。`taos-jdbcdriver` 实现了 JDBC driver 标准的接口,并提供两种形式的连接器。一种是通过 TDengine 客户端驱动程序(taosc)原生连接 TDengine 实例,支持数据写入、查询、订阅、schemaless 接口和参数绑定接口等功能,一种是通过 taosAdapter 提供的 REST 接口连接 TDengine 实例(2.4.0.0 及更高版本)。REST 连接实现的功能集合和原生连接有少量不同。 -![tdengine-connector](tdengine-jdbc-connector.png) +![TDengine Database Connector Java](tdengine-jdbc-connector.webp) 上图显示了两种 Java 应用使用连接器访问 TDengine 的两种方式: @@ -208,10 +208,10 @@ url 中的配置参数如下: - 与原生连接方式不同,REST 接口是无状态的。在使用 JDBC REST 连接时,需要在 SQL 中指定表、超级表的数据库名称。例如: ```sql -INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('beijing') VALUES(now, 24.6); +INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFrancisco') VALUES(now, 24.6); ``` -- 从 taos-jdbcdriver-2.0.36 和 TDengine 2.2.0.0 版本开始,如果在 url 中指定了 dbname,那么,JDBC REST 连接会默认使用/rest/sql/dbname 作为 restful 请求的 url,在 SQL 中不需要指定 dbname。例如:url 为 jdbc:TAOS-RS://127.0.0.1:6041/test,那么,可以执行 sql:insert into t1 using weather(ts, temperature) tags('beijing') values(now, 24.6); +- 从 taos-jdbcdriver-2.0.36 和 TDengine 2.2.0.0 版本开始,如果在 url 中指定了 dbname,那么,JDBC REST 连接会默认使用/rest/sql/dbname 作为 restful 请求的 url,在 SQL 中不需要指定 dbname。例如:url 为 jdbc:TAOS-RS://127.0.0.1:6041/test,那么,可以执行 sql:insert into t1 using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6); ::: @@ -563,7 +563,7 @@ public class ParameterBindingDemo { // set table name pstmt.setTableName("t5_" + i); // set tags - pstmt.setTagNString(0, "北京-abc"); + pstmt.setTagNString(0, "California.SanFrancisco"); // set columns ArrayList tsList = new ArrayList<>(); @@ -574,7 +574,7 @@ public class ParameterBindingDemo { ArrayList f1List = new ArrayList<>(); for (int j = 0; j < numOfRow; j++) { - f1List.add("北京-abc"); + f1List.add("California.LosAngeles"); } pstmt.setNString(1, f1List, BINARY_COLUMN_SIZE); @@ -633,7 +633,7 @@ public class SchemalessInsertTest { private static final String host = "127.0.0.1"; private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000"; private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0"; - private static final String jsonDemo = "{\"metric\": \"meter_current\",\"timestamp\": 1346846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"Beijing\", \"id\": \"d1001\"}}"; + private static final String jsonDemo = "{\"metric\": \"meter_current\",\"timestamp\": 1346846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}"; public static void main(String[] args) throws SQLException { final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; diff --git a/docs-cn/14-reference/03-connector/node.mdx b/docs-cn/14-reference/03-connector/node.mdx index 12345fa9fe995c41828df07703f0efb61a2e029d..9f2bed9e97cb33aeabfce3d69dc3774931b426c0 100644 --- a/docs-cn/14-reference/03-connector/node.mdx +++ b/docs-cn/14-reference/03-connector/node.mdx @@ -14,7 +14,6 @@ import NodeInfluxLine from "../../07-develop/03-insert-data/_js_line.mdx"; import NodeOpenTSDBTelnet from "../../07-develop/03-insert-data/_js_opts_telnet.mdx"; import NodeOpenTSDBJson from "../../07-develop/03-insert-data/_js_opts_json.mdx"; import NodeQuery from "../../07-develop/04-query-data/_js.mdx"; -import NodeAsyncQuery from "../../07-develop/04-query-data/_js_async.mdx"; `td2.0-connector` 和 `td2.0-rest-connector` 是 TDengine 的官方 Node.js 语言连接器。Node.js 开发人员可以通过它开发可以存取 TDengine 集群数据的应用软件。 @@ -189,14 +188,8 @@ let cursor = conn.cursor(); ### 查询数据 -#### 同步查询 - -#### 异步查询 - - - ## 更多示例程序 | 示例程序 | 示例程序描述 | diff --git a/docs-cn/14-reference/03-connector/tdengine-jdbc-connector.png b/docs-cn/14-reference/03-connector/tdengine-jdbc-connector.png deleted file mode 100644 index 1cb8401ea30b01d8db652ed4ea70ecc511de7461..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/03-connector/tdengine-jdbc-connector.png and /dev/null differ diff --git a/docs-cn/14-reference/03-connector/tdengine-jdbc-connector.webp b/docs-cn/14-reference/03-connector/tdengine-jdbc-connector.webp new file mode 100644 index 0000000000000000000000000000000000000000..0956d6005ffc5e90727d49d7566158affdda09c2 Binary files /dev/null and b/docs-cn/14-reference/03-connector/tdengine-jdbc-connector.webp differ diff --git a/docs-cn/14-reference/04-taosadapter.md b/docs-cn/14-reference/04-taosadapter.md index 90a31ec94c94559311e2c91cd34f75af7e87e9a0..6e259391d40acfd48d8db8db3246ad2196ce0520 100644 --- a/docs-cn/14-reference/04-taosadapter.md +++ b/docs-cn/14-reference/04-taosadapter.md @@ -24,7 +24,7 @@ taosAdapter 提供以下功能: ## taosAdapter 架构图 -![taosAdapter Architecture](taosAdapter-architecture.png) +![TDengine Database taosAdapter Architecture](taosAdapter-architecture.webp) ## taosAdapter 部署方法 diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.png b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.png deleted file mode 100644 index 4708f836feb21980f2db7fed4a55f799b23a6ec1..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp new file mode 100644 index 0000000000000000000000000000000000000000..a78e18028a94c2f6a783b08d992a25c791527407 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.png b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.png deleted file mode 100644 index f2684e6eed70e8f56697eae42b495d6bd62815e8..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp new file mode 100644 index 0000000000000000000000000000000000000000..b152418d0902b8ebdf62ebce6705c10dd5ab4fbf Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.png b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.png deleted file mode 100644 index 74686691e4106b8646c3deee1e0ce73b2f53f1ea..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp new file mode 100644 index 0000000000000000000000000000000000000000..f58f48b7f17375cb8e62e7c0126ca3aea56a13f6 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-4-requests.png b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-4-requests.png deleted file mode 100644 index 27964215567f9f961c0aeaf1b863188437008fb7..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-4-requests.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp new file mode 100644 index 0000000000000000000000000000000000000000..00afcce013602dce0da17bfd033f65aaa8e43bb7 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-5-database.png b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-5-database.png deleted file mode 100644 index b0d3abbf21ec4d4bd7bfb95fcc03a5f936b22665..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-5-database.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-5-database.webp b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-5-database.webp new file mode 100644 index 0000000000000000000000000000000000000000..567e5694f9d7a035a3eb354493d3df8ed64db251 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-5-database.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.png b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.png deleted file mode 100644 index 2b54cbeb83bcff12f20461a4f57f882e2073f231..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.webp b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.webp new file mode 100644 index 0000000000000000000000000000000000000000..cc8a912810f35e53a6e5fa96ea0c81e334ffc0df Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-7-login-history.png b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-7-login-history.png deleted file mode 100644 index eb3848657f13900c856ac595c20766465157e9c4..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-7-login-history.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-7-login-history.webp b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-7-login-history.webp new file mode 100644 index 0000000000000000000000000000000000000000..651b716bc511ba2ed5db5e6fc6b0591ef150cbf6 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-7-login-history.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.png b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.png deleted file mode 100644 index d94b2e02ac9855bb3d2f77d8902e068839db364f..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp new file mode 100644 index 0000000000000000000000000000000000000000..8666193f59497180574fd2786266e5baabbe9761 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-full.png b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-full.png deleted file mode 100644 index 654df2934597ce600a1dc2dcd0cab7e29de7076d..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-full.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-full.webp b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-full.webp new file mode 100644 index 0000000000000000000000000000000000000000..7f38a76a2b899ffebc7aecd39c8ec4fd0b2da778 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-full.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-manager-status.png b/docs-cn/14-reference/07-tdinsight/assets/alert-manager-status.png deleted file mode 100644 index e3afa22c0326d70567ec4529c83101c746daac87..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/alert-manager-status.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-manager-status.webp b/docs-cn/14-reference/07-tdinsight/assets/alert-manager-status.webp new file mode 100644 index 0000000000000000000000000000000000000000..3d7fe932a23f3720e76e4217a7b5d1868d81fac8 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/alert-manager-status.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-notification-channel.png b/docs-cn/14-reference/07-tdinsight/assets/alert-notification-channel.png deleted file mode 100644 index 198bf37141c86a66cdd91b47a331bcdeb83daaf8..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/alert-notification-channel.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-notification-channel.webp b/docs-cn/14-reference/07-tdinsight/assets/alert-notification-channel.webp new file mode 100644 index 0000000000000000000000000000000000000000..517123954efe4b94485fdab2e07be0d765f5daa2 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/alert-notification-channel.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-query-demo.png b/docs-cn/14-reference/07-tdinsight/assets/alert-query-demo.png deleted file mode 100644 index ace3aa3c2f8f14fabdac54bc25ae2d9449445b69..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/alert-query-demo.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-query-demo.webp b/docs-cn/14-reference/07-tdinsight/assets/alert-query-demo.webp new file mode 100644 index 0000000000000000000000000000000000000000..6666296ac16e7a0c0ab3db23f0517f2089d09035 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/alert-query-demo.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.png b/docs-cn/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.png deleted file mode 100644 index 7082e49f6beb8690c36f98a3f4ff2befdb8fd014..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.webp b/docs-cn/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.webp new file mode 100644 index 0000000000000000000000000000000000000000..6f74bc3a47a32de661ef25f787a947d823715810 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-rule-test.png b/docs-cn/14-reference/07-tdinsight/assets/alert-rule-test.png deleted file mode 100644 index ffd4911b53854c42dbf0ff11838cb604fa694138..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/alert-rule-test.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-rule-test.webp b/docs-cn/14-reference/07-tdinsight/assets/alert-rule-test.webp new file mode 100644 index 0000000000000000000000000000000000000000..acda3b24a6263815ac8b658709d2172300ca3b00 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/alert-rule-test.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-button.png b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-button.png deleted file mode 100644 index 802c7366f921301bd7fbc62458e56b2d1eaf195c..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-button.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-button.webp b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-button.webp new file mode 100644 index 0000000000000000000000000000000000000000..903e236e2a776dfef7f85c014662e8913a9033a5 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-button.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.png b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.png deleted file mode 100644 index 019ec921b6f808671f4f864ddf3380159d4a0dcc..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.webp b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.webp new file mode 100644 index 0000000000000000000000000000000000000000..14fcfe9d183e8804199708ae4492d0904a7c9d62 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-test.png b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-test.png deleted file mode 100644 index 3963abb4ea8ae0e6f5557466f7a5b746c2d2ea3c..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-test.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-test.webp b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-test.webp new file mode 100644 index 0000000000000000000000000000000000000000..00b50cc619b030d1fb2be3a367183901d5c833e8 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-test.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource.png b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource.png deleted file mode 100644 index 837100464b35a5cafac474723aef603f91945ebc..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource.webp b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource.webp new file mode 100644 index 0000000000000000000000000000000000000000..06d0ff6ed50091a6340508bc5b2b3f78b65dcb18 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-display.png b/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-display.png deleted file mode 100644 index 98223df25499effac343ff5723544a3c289f18fa..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-display.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-display.webp b/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-display.webp new file mode 100644 index 0000000000000000000000000000000000000000..e2ec052b91e439a817f6e88b8afd0fcb4dcb7ef8 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-display.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-import-options.png b/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-import-options.png deleted file mode 100644 index 07aba348f02b4fb8ef68e79664920c119b842d4c..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-import-options.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-import-options.webp b/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-import-options.webp new file mode 100644 index 0000000000000000000000000000000000000000..665c035f9755b9472aee33cd61d3ab52831194b5 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-import-options.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-import-dashboard.png b/docs-cn/14-reference/07-tdinsight/assets/howto-import-dashboard.png deleted file mode 100644 index 7e28939ead8bf3b6e2b4330e4f9b59c2e39b5c1c..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/howto-import-dashboard.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-import-dashboard.webp b/docs-cn/14-reference/07-tdinsight/assets/howto-import-dashboard.webp new file mode 100644 index 0000000000000000000000000000000000000000..7dc42eeba919fee7b438a453c00bb9fd0ac2d274 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/howto-import-dashboard.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-15167.png b/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-15167.png deleted file mode 100644 index 981f640b14d18aa6f0682768d8405a232df500f6..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-15167.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-15167.webp b/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-15167.webp new file mode 100644 index 0000000000000000000000000000000000000000..7ef081900f8de99c859193b69d49b3d6bc187909 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-15167.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.png b/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.png deleted file mode 100644 index 94ef4fa5fe63e535118a81707b413c028ce01f70..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.webp b/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.webp new file mode 100644 index 0000000000000000000000000000000000000000..602452fc4c89424d8e17d46d74949b69be84dbe8 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.png b/docs-cn/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.png deleted file mode 100644 index 670cacc377c2801fa9437c3c132c5c7fbc361b0f..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.webp b/docs-cn/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.webp new file mode 100644 index 0000000000000000000000000000000000000000..35a3ebba781f24dbb0066993d1ca2f02659997d2 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/import_dashboard.png b/docs-cn/14-reference/07-tdinsight/assets/import_dashboard.png deleted file mode 100644 index d74cd36c96ee0fd24ddc6feae2da07824816f745..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/import_dashboard.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/import_dashboard.webp b/docs-cn/14-reference/07-tdinsight/assets/import_dashboard.webp new file mode 100644 index 0000000000000000000000000000000000000000..fb7958f1b9fbd43c8f63136024842790e711c490 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/import_dashboard.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/tdengine_dashboard.png b/docs-cn/14-reference/07-tdinsight/assets/tdengine_dashboard.png deleted file mode 100644 index 0101e7430cb2ef673818de8bd3af53d0d082ad3f..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/tdengine_dashboard.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/tdengine_dashboard.webp b/docs-cn/14-reference/07-tdinsight/assets/tdengine_dashboard.webp new file mode 100644 index 0000000000000000000000000000000000000000..49f1d88f4ad93286cd8582536e82b4dcc4ff271b Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/tdengine_dashboard.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/index.md b/docs-cn/14-reference/07-tdinsight/index.md index a554d7ee6b36797940282fa8401df2f22c4cf579..5990a831b8bc1788deaddfb38f717f2723969362 100644 --- a/docs-cn/14-reference/07-tdinsight/index.md +++ b/docs-cn/14-reference/07-tdinsight/index.md @@ -233,33 +233,33 @@ sudo systemctl enable grafana-server 指向 **Configurations** -> **Data Sources** 菜单,然后点击 **Add data source** 按钮。 -![添加数据源按钮](./assets/howto-add-datasource-button.png) +![TDengine Database TDinsight 添加数据源按钮](./assets/howto-add-datasource-button.webp) 搜索并选择**TDengine**。 -![添加数据源](./assets/howto-add-datasource-tdengine.png) +![TDengine Database TDinsight 添加数据源](./assets/howto-add-datasource-tdengine.webp) 配置 TDengine 数据源。 -![数据源配置](./assets/howto-add-datasource.png) +![TDengine Database TDinsight 数据源配置](./assets/howto-add-datasource.webp) 保存并测试,正常情况下会报告 'TDengine Data source is working'。 -![数据源测试](./assets/howto-add-datasource-test.png) +![TDengine Database TDinsight 数据源测试](./assets/howto-add-datasource-test.webp) ### 导入仪表盘 指向 **+** / **Create** - **import**(或 `/dashboard/import` url)。 -![导入仪表盘和配置](./assets/import_dashboard.png) +![TDengine Database TDinsight 导入仪表盘和配置](./assets/import_dashboard.webp) 在 **Import via grafana.com** 位置键入仪表盘 ID `15167` 并 **Load**。 -![通过 grafana.com 导入](./assets/import-dashboard-15167.png) +![通过 grafana.com 导入](./assets/import-dashboard-15167.webp) 导入完成后,TDinsight 的完整页面视图如下所示。 -![显示](./assets/TDinsight-full.png) +![TDengine Database TDinsight 显示](./assets/TDinsight-full.webp) ## TDinsight 仪表盘详细信息 @@ -269,7 +269,7 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes ### 集群状态 -![tdinsight-mnodes-overview](./assets/TDinsight-1-cluster-status.png) +![TDengine Database TDinsight mnodes overview](./assets/TDinsight-1-cluster-status.webp) 这部分包括集群当前信息和状态,告警信息也在此处(从左到右,从上到下)。 @@ -289,7 +289,7 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes ### DNodes 状态 -![tdinsight-mnodes-overview](./assets/TDinsight-2-dnodes.png) +![TDengine Database TDinsight mnodes overview](./assets/TDinsight-2-dnodes.webp) - **DNodes Status**:`show dnodes` 的简单表格视图。 - **DNodes Lifetime**:从创建 dnode 开始经过的时间。 @@ -298,14 +298,14 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes ### MNode 概述 -![tdinsight-mnodes-overview](./assets/TDinsight-3-mnodes.png) +![TDengine Database TDinsight mnodes overview](./assets/TDinsight-3-mnodes.webp) 1. **MNodes Status**:`show mnodes` 的简单表格视图。 2. **MNodes Number**:类似于`DNodes Number`,MNodes 数量变化。 ### 请求 -![tdinsight-requests](./assets/TDinsight-4-requests.png) +![TDengine Database TDinsight requests](./assets/TDinsight-4-requests.webp) 1. **Requests Rate(Inserts per Second)**:平均每秒插入次数。 2. **Requests (Selects)**:查询请求数及变化率(count of second)。 @@ -313,7 +313,7 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes ### 数据库 -![tdinsight-database](./assets/TDinsight-5-database.png) +![TDengine Database TDinsight database](./assets/TDinsight-5-database.webp) 数据库使用情况,对变量 `$database` 的每个值即每个数据库进行重复多行展示。 @@ -325,7 +325,7 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes ### DNode 资源使用情况 -![dnode-usage](./assets/TDinsight-6-dnode-usage.png) +![TDengine Database TDinsight dnode-usage](./assets/TDinsight-6-dnode-usage.webp) 数据节点资源使用情况展示,对变量 `$fqdn` 即每个数据节点进行重复多行展示。包括: @@ -346,13 +346,13 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes ### 登录历史 -![登录历史](./assets/TDinsight-7-login-history.png) +![TDengine Database TDinsight 登录历史](./assets/TDinsight-7-login-history.webp) 目前只报告每分钟登录次数。 ### 监控 taosAdapter -![taosadapter](./assets/TDinsight-8-taosadapter.png) +![TDengine Database TDinsight monitor taosadapter](./assets/TDinsight-8-taosadapter.webp) 支持监控 taosAdapter 请求统计和状态详情。包括: diff --git a/docs-cn/14-reference/12-config/index.md b/docs-cn/14-reference/12-config/index.md index cbb3833b5bb170720c2aa7bea6687a50feeae7d5..89c414a5b8479d8253b2a1fa1e3ab3b684f75e78 100644 --- a/docs-cn/14-reference/12-config/index.md +++ b/docs-cn/14-reference/12-config/index.md @@ -80,7 +80,7 @@ taos --dump-config | 补充说明 | RESTful 服务在 2.4.0.0 之前(不含)由 taosd 提供,默认端口为 6041; 在 2.4.0.0 及后续版本由 taosAdapter,默认端口为 6041 | :::note -对于端口,TDengine 会使用从 serverPort 起 13 个连续的 TCP 和 UDP 端口号,请务必在防火墙打开。因此如果是缺省配置,需要打开从 6030 到 6042 共 13 个端口,而且必须 TCP 和 UDP 都打开。(详细的端口情况请参见下表) +确保集群中所有主机在端口 6030-6042 上的 TCP/UDP 协议能够互通。(详细的端口情况请参见下表) ::: | 协议 | 默认端口 | 用途说明 | 修改方法 | | :--- | :-------- | :---------------------------------- | :--------------------------------------------------------------------------------------------------------------------------------- | @@ -590,7 +590,7 @@ charset 的有效值是 UTF-8。 | 适用范围 | 仅服务端适用 | | 含义 | 每个 DB 中 能够使用的最大 vnode 个数 | | 取值范围 | 0-8192 | -| 缺省值 | | +| 缺省值 | 0 | ### maxTablesPerVnode diff --git a/docs-cn/14-reference/13-schemaless/13-schemaless.md b/docs-cn/14-reference/13-schemaless/13-schemaless.md index 4de310c248d7763690acef80cdca1c50f609d63b..f2712f2814593bddd65401cb129c8c58ee55a316 100644 --- a/docs-cn/14-reference/13-schemaless/13-schemaless.md +++ b/docs-cn/14-reference/13-schemaless/13-schemaless.md @@ -82,7 +82,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 :::tip 无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过 -16k 字节。这方面的具体限制约束请参见 [TAOS SQL 边界限制](/taos-sql/limit) +48KB。这方面的具体限制约束请参见 [TAOS SQL 边界限制](/taos-sql/limit) ::: diff --git a/docs-cn/14-reference/taosAdapter-architecture.png b/docs-cn/14-reference/taosAdapter-architecture.png deleted file mode 100644 index 08a9018553aae6f86b42d127b372d0cecfa9bdf8..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/taosAdapter-architecture.png and /dev/null differ diff --git a/docs-cn/14-reference/taosAdapter-architecture.webp b/docs-cn/14-reference/taosAdapter-architecture.webp new file mode 100644 index 0000000000000000000000000000000000000000..a4162b0a037c06d34191784716c51080b9f8a570 Binary files /dev/null and b/docs-cn/14-reference/taosAdapter-architecture.webp differ diff --git a/docs-cn/20-third-party/01-grafana.mdx b/docs-cn/20-third-party/01-grafana.mdx index 9a4c33d8aceb086ff8ba8dca0f38b1bcbf762005..328bd6bb4595a6d205cff45539d69e868d33d488 100644 --- a/docs-cn/20-third-party/01-grafana.mdx +++ b/docs-cn/20-third-party/01-grafana.mdx @@ -64,15 +64,15 @@ GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=tdengine-datasource 用户可以直接通过 http://localhost:3000 的网址,登录 Grafana 服务器(用户名/密码:admin/admin),通过左侧 `Configuration -> Data Sources` 可以添加数据源,如下图所示: -![img](/img/connections/add_datasource1.jpg) +![TDengine Database Grafana plugin add data source](./add_datasource1.webp) 点击 `Add data source` 可进入新增数据源页面,在查询框中输入 TDengine 可选择添加,如下图所示: -![img](/img/connections/add_datasource2.jpg) +![TDengine Database Grafana plugin add data source](./add_datasource2.webp) 进入数据源配置页面,按照默认提示修改相应配置即可: -![img](/img/connections/add_datasource3.jpg) +![TDengine Database Grafana plugin add data source](./add_datasource3.webp) - Host: TDengine 集群中提供 REST 服务 (在 2.4 之前由 taosd 提供, 从 2.4 开始由 taosAdapter 提供)的组件所在服务器的 IP 地址与 TDengine REST 服务的端口号(6041),默认 http://localhost:6041。 - User:TDengine 用户名。 @@ -80,13 +80,13 @@ GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=tdengine-datasource 点击 `Save & Test` 进行测试,成功会有如下提示: -![img](/img/connections/add_datasource4.jpg) +![TDengine Database Grafana plugin add data source](./add_datasource4.webp) ### 创建 Dashboard 回到主界面创建 Dashboard,点击 Add Query 进入面板查询页面: -![img](/img/connections/create_dashboard1.jpg) +![TDengine Database Grafana plugin create dashboard](./create_dashboard1.webp) 如上图所示,在 Query 中选中 `TDengine` 数据源,在下方查询框可输入相应 SQL 进行查询,具体说明如下: @@ -96,7 +96,7 @@ GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=tdengine-datasource 按照默认提示查询当前 TDengine 部署所在服务器指定间隔系统内存平均使用量如下: -![img](/img/connections/create_dashboard2.jpg) +![TDengine Database Grafana plugin create dashboard](./create_dashboard2.webp) > 关于如何使用 Grafana 创建相应的监测界面以及更多有关使用 Grafana 的信息,请参考 Grafana 官方的[文档](https://grafana.com/docs/)。 diff --git a/docs-cn/20-third-party/09-emq-broker.md b/docs-cn/20-third-party/09-emq-broker.md index f57ccb20e6517c51b55093d11fa767bef7d0c9a8..833fa97e2e5f9f138718e18bb16aa3e65abca8cc 100644 --- a/docs-cn/20-third-party/09-emq-broker.md +++ b/docs-cn/20-third-party/09-emq-broker.md @@ -45,25 +45,25 @@ MQTT 是流行的物联网数据传输协议,[EMQX](https://github.com/emqx/em 使用浏览器打开网址 http://IP:18083 并登录 EMQX Dashboard。初次安装用户名为 `admin` 密码为:`public` -![img](./emqx/login-dashboard.png) +![TDengine Database EMQX login dashboard](./emqx/login-dashboard.webp) ### 创建规则(Rule) 选择左侧“规则引擎(Rule Engine)”中的“规则(Rule)”并点击“创建(Create)”按钮: -![img](./emqx/rule-engine.png) +![TDengine Database EMQX rule engine](./emqx/rule-engine.webp) ### 编辑 SQL 字段 -![img](./emqx/create-rule.png) +![TDengine Database EMQX create rule](./emqx/create-rule.webp) ### 新增“动作(action handler)” -![img](./emqx/add-action-handler.png) +![TDengine Database EMQX](./emqx/add-action-handler.webp) ### 新增“资源(Resource)” -![img](./emqx/create-resource.png) +![TDengine Database EMQX create resource](./emqx/create-resource.webp) 选择“发送数据到 Web 服务“并点击“新建资源”按钮: @@ -71,13 +71,13 @@ MQTT 是流行的物联网数据传输协议,[EMQX](https://github.com/emqx/em 选择“发送数据到 Web 服务“并填写 请求 URL 为 运行 taosAdapter 的服务器地址和端口(默认为 6041)。其他属性请保持默认值。 -![img](./emqx/edit-resource.png) +![TDengine Database EMQX edit resource](./emqx/edit-resource.webp) ### 编辑“动作(action)” 编辑资源配置,增加 Authorization 认证的键/值配对项,相关文档请参考[ TDengine REST API 文档](https://docs.taosdata.com/reference/rest-api/)。在消息体中输入规则引擎替换模板。 -![img](./emqx/edit-action.png) +![TDengine Database EMQX edit action](./emqx/edit-action.webp) ## 编写模拟测试程序 @@ -164,7 +164,7 @@ MQTT 是流行的物联网数据传输协议,[EMQX](https://github.com/emqx/em 注意:代码中 CLIENT_NUM 在开始测试中可以先设置一个较小的值,避免硬件性能不能完全处理较大并发客户端数量。 -![img](./emqx/client-num.png) +![TDengine Database EMQX client num](./emqx/client-num.webp) ## 执行测试模拟发送 MQTT 数据 @@ -173,19 +173,19 @@ npm install mqtt mockjs --save --registry=https://registry.npm.taobao.org node mock.js ``` -![img](./emqx/run-mock.png) +![TDengine Database EMQX run-mock](./emqx/run-mock.webp) ## 验证 EMQX 接收到数据 在 EMQX Dashboard 规则引擎界面进行刷新,可以看到有多少条记录被正确接收到: -![img](./emqx/check-rule-matched.png) +![TDengine Database EMQX rule matched](./emqx/check-rule-matched.webp) ## 验证数据写入到 TDengine 使用 TDengine CLI 程序登录并查询相应数据库和表,验证数据是否被正确写入到 TDengine 中: -![img](./emqx/check-result-in-taos.png) +![TDengine Database EMQX result in taos](./emqx/check-result-in-taos.webp) TDengine 详细使用方法请参考 [TDengine 官方文档](https://docs.taosdata.com/)。 EMQX 详细使用方法请参考 [EMQX 官方文档](https://www.emqx.io/docs/zh/v4.4/rule/rule-engine.html)。 diff --git a/docs-cn/20-third-party/11-kafka.md b/docs-cn/20-third-party/11-kafka.md index d12d5fab75671d8a1e7356e766d0e8979c6519c2..0de5b43a396fa3bb4aba558308f95cb0d6f96bc5 100644 --- a/docs-cn/20-third-party/11-kafka.md +++ b/docs-cn/20-third-party/11-kafka.md @@ -9,11 +9,11 @@ TDengine Kafka Connector 包含两个插件: TDengine Source Connector 和 TDeng Kafka Connect 是 Apache Kafka 的一个组件,用于使其它系统,比如数据库、云服务、文件系统等能方便地连接到 Kafka。数据既可以通过 Kafka Connect 从其它系统流向 Kafka, 也可以通过 Kafka Connect 从 Kafka 流向其它系统。从其它系统读数据的插件称为 Source Connector, 写数据到其它系统的插件称为 Sink Connector。Source Connector 和 Sink Connector 都不会直接连接 Kafka Broker,Source Connector 把数据转交给 Kafka Connect。Sink Connector 从 Kafka Connect 接收数据。 -![](kafka/Kafka_Connect.png) +![TDengine Database Kafka Connector -- Kafka Connect structure](kafka/Kafka_Connect.webp) TDengine Source Connector 用于把数据实时地从 TDengine 读出来发送给 Kafka Connect。TDengine Sink Connector 用于 从 Kafka Connect 接收数据并写入 TDengine。 -![](kafka/streaming-integration-with-kafka-connect.png) +![TDengine Database Kafka Connector -- streaming integration with kafka connect](kafka/streaming-integration-with-kafka-connect.webp) ## 什么是 Confluent? @@ -26,7 +26,7 @@ Confluent 在 Kafka 的基础上增加很多扩展功能。包括: 5. 管理和监控 Kafka 的 GUI —— Confluent 控制中心 这些扩展功能有的包含在社区版本的 Confluent 中,有的只有企业版能用。 -![](kafka/confluentPlatform.png) +![TDengine Database Kafka Connector -- Confluent introduction](kafka/confluentPlatform.webp) Confluent 企业版提供了 `confluent` 命令行工具管理各个组件。 @@ -196,10 +196,10 @@ confluent local services connect connector load TDengineSinkConnector --config . 准备测试数据的文本文件,内容如下: ```txt title="test-data.txt" -meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000000 -meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250000000 -meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249000000 -meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250000000 +meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000000 +meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250000000 +meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249000000 +meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250000000 ``` 使用 kafka-console-producer 向主题 meters 添加测试数据。 @@ -223,10 +223,10 @@ Database changed. taos> select * from meters; ts | current | voltage | phase | groupid | location | =============================================================================================================================================================== - 2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | Beijing.Haidian | - 2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | Beijing.Haidian | - 2022-03-28 09:56:51.249000000 | 10.800000000 | 223.000000000 | 0.290000000 | 3 | Beijing.Haidian | - 2022-03-28 09:56:51.250000000 | 11.300000000 | 221.000000000 | 0.350000000 | 3 | Beijing.Haidian | + 2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles | + 2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles | + 2022-03-28 09:56:51.249000000 | 10.800000000 | 223.000000000 | 0.290000000 | 3 | California.LosAngeles | + 2022-03-28 09:56:51.250000000 | 11.300000000 | 221.000000000 | 0.350000000 | 3 | California.LosAngeles | Query OK, 4 row(s) in set (0.004208s) ``` @@ -275,7 +275,7 @@ DROP DATABASE IF EXISTS test; CREATE DATABASE test; USE test; CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT); -INSERT INTO d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) d1002 USING meters TAGS(Beijing.Chaoyang, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) d1003 USING meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) d1003 USING meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) d1004 USING meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) d1004 USING meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000); +INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) d1002 USING meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) d1003 USING meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) d1003 USING meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) d1004 USING meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) d1004 USING meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000); ``` 使用 TDengine CLI, 执行 SQL 文件。 @@ -302,8 +302,8 @@ kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topi ``` ...... -meters,location="beijing.chaoyang",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000 -meters,location="beijing.chaoyang",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000 +meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000 +meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000 ...... ``` diff --git a/docs-cn/20-third-party/add_datasource1.webp b/docs-cn/20-third-party/add_datasource1.webp new file mode 100644 index 0000000000000000000000000000000000000000..211edc4457abd0db6b0ef64636d61d65b5f43db6 Binary files /dev/null and b/docs-cn/20-third-party/add_datasource1.webp differ diff --git a/docs-cn/20-third-party/add_datasource2.webp b/docs-cn/20-third-party/add_datasource2.webp new file mode 100644 index 0000000000000000000000000000000000000000..8ab547231fee4d3b0874fcfe08c0ce152b0c53a1 Binary files /dev/null and b/docs-cn/20-third-party/add_datasource2.webp differ diff --git a/docs-cn/20-third-party/add_datasource3.webp b/docs-cn/20-third-party/add_datasource3.webp new file mode 100644 index 0000000000000000000000000000000000000000..d8a733360a09b4425c571f254a9ecb298c04b72f Binary files /dev/null and b/docs-cn/20-third-party/add_datasource3.webp differ diff --git a/docs-cn/20-third-party/add_datasource4.webp b/docs-cn/20-third-party/add_datasource4.webp new file mode 100644 index 0000000000000000000000000000000000000000..b1e0fc6e2b27df4af1bb5ad92756bcb5d4fda63e Binary files /dev/null and b/docs-cn/20-third-party/add_datasource4.webp differ diff --git a/docs-cn/20-third-party/create_dashboard1.webp b/docs-cn/20-third-party/create_dashboard1.webp new file mode 100644 index 0000000000000000000000000000000000000000..55eb388833e4df2a46f4d1cf6d346aa11429385d Binary files /dev/null and b/docs-cn/20-third-party/create_dashboard1.webp differ diff --git a/docs-cn/20-third-party/create_dashboard2.webp b/docs-cn/20-third-party/create_dashboard2.webp new file mode 100644 index 0000000000000000000000000000000000000000..bb40e407187718c52e9f617d8ebd3d25fd14b56b Binary files /dev/null and b/docs-cn/20-third-party/create_dashboard2.webp differ diff --git a/docs-cn/20-third-party/dashboard-15146.webp b/docs-cn/20-third-party/dashboard-15146.webp new file mode 100644 index 0000000000000000000000000000000000000000..fae586f5c74317621002416b2824830a7bdf3982 Binary files /dev/null and b/docs-cn/20-third-party/dashboard-15146.webp differ diff --git a/docs-cn/20-third-party/emqx/add-action-handler.png b/docs-cn/20-third-party/emqx/add-action-handler.png deleted file mode 100644 index 97a1f933ecfadfcab399938806d73c5a5ecc6427..0000000000000000000000000000000000000000 Binary files a/docs-cn/20-third-party/emqx/add-action-handler.png and /dev/null differ diff --git a/docs-cn/20-third-party/emqx/add-action-handler.webp b/docs-cn/20-third-party/emqx/add-action-handler.webp new file mode 100644 index 0000000000000000000000000000000000000000..4a8d105f711991226cfbd43b6e9ab07d7ccc686a Binary files /dev/null and b/docs-cn/20-third-party/emqx/add-action-handler.webp differ diff --git a/docs-cn/20-third-party/emqx/check-result-in-taos.png b/docs-cn/20-third-party/emqx/check-result-in-taos.png deleted file mode 100644 index c17a5c1ea2b9bbd49263056c8bf09c9aabab07d5..0000000000000000000000000000000000000000 Binary files a/docs-cn/20-third-party/emqx/check-result-in-taos.png and /dev/null differ diff --git a/docs-cn/20-third-party/emqx/check-result-in-taos.webp b/docs-cn/20-third-party/emqx/check-result-in-taos.webp new file mode 100644 index 0000000000000000000000000000000000000000..8fa040a86104fece02ddaf8986f0a67de316143d Binary files /dev/null and b/docs-cn/20-third-party/emqx/check-result-in-taos.webp differ diff --git a/docs-cn/20-third-party/emqx/check-rule-matched.png b/docs-cn/20-third-party/emqx/check-rule-matched.png deleted file mode 100644 index 9e9a466946a1afa857e2bbc07b14956dd0f984b6..0000000000000000000000000000000000000000 Binary files a/docs-cn/20-third-party/emqx/check-rule-matched.png and /dev/null differ diff --git a/docs-cn/20-third-party/emqx/check-rule-matched.webp b/docs-cn/20-third-party/emqx/check-rule-matched.webp new file mode 100644 index 0000000000000000000000000000000000000000..e5a614035739df859b27c817f3b9f41be444b513 Binary files /dev/null and b/docs-cn/20-third-party/emqx/check-rule-matched.webp differ diff --git a/docs-cn/20-third-party/emqx/client-num.png b/docs-cn/20-third-party/emqx/client-num.png deleted file mode 100644 index fff48cbf3b271c367079ddde425b3f9b014062f7..0000000000000000000000000000000000000000 Binary files a/docs-cn/20-third-party/emqx/client-num.png and /dev/null differ diff --git a/docs-cn/20-third-party/emqx/client-num.webp b/docs-cn/20-third-party/emqx/client-num.webp new file mode 100644 index 0000000000000000000000000000000000000000..a151b184843607d67b649babb3145bfb3e329cda Binary files /dev/null and b/docs-cn/20-third-party/emqx/client-num.webp differ diff --git a/docs-cn/20-third-party/emqx/create-resource.png b/docs-cn/20-third-party/emqx/create-resource.png deleted file mode 100644 index 58da4c391a3692b9f5fa348d952701eab8bcb746..0000000000000000000000000000000000000000 Binary files a/docs-cn/20-third-party/emqx/create-resource.png and /dev/null differ diff --git a/docs-cn/20-third-party/emqx/create-resource.webp b/docs-cn/20-third-party/emqx/create-resource.webp new file mode 100644 index 0000000000000000000000000000000000000000..bf9cccbe49c57f925c5e6b094a4c0d88a64242cb Binary files /dev/null and b/docs-cn/20-third-party/emqx/create-resource.webp differ diff --git a/docs-cn/20-third-party/emqx/create-rule.png b/docs-cn/20-third-party/emqx/create-rule.png deleted file mode 100644 index 73b0b6ee3e6065a142df98abe8c0dbb32b34f89d..0000000000000000000000000000000000000000 Binary files a/docs-cn/20-third-party/emqx/create-rule.png and /dev/null differ diff --git a/docs-cn/20-third-party/emqx/create-rule.webp b/docs-cn/20-third-party/emqx/create-rule.webp new file mode 100644 index 0000000000000000000000000000000000000000..13e8fc83d48d2fd9d0a303c707ef3024d3ee5203 Binary files /dev/null and b/docs-cn/20-third-party/emqx/create-rule.webp differ diff --git a/docs-cn/20-third-party/emqx/edit-action.png b/docs-cn/20-third-party/emqx/edit-action.png deleted file mode 100644 index 2a43ee369a439cf11cee23c11f25d6a84b26d7dc..0000000000000000000000000000000000000000 Binary files a/docs-cn/20-third-party/emqx/edit-action.png and /dev/null differ diff --git a/docs-cn/20-third-party/emqx/edit-action.webp b/docs-cn/20-third-party/emqx/edit-action.webp new file mode 100644 index 0000000000000000000000000000000000000000..7f6d2e36a82b1917930e5d3969115db9359674a0 Binary files /dev/null and b/docs-cn/20-third-party/emqx/edit-action.webp differ diff --git a/docs-cn/20-third-party/emqx/edit-resource.png b/docs-cn/20-third-party/emqx/edit-resource.png deleted file mode 100644 index 0a0b3560044f4ed6e0a8f040b74085a7e8948b1f..0000000000000000000000000000000000000000 Binary files a/docs-cn/20-third-party/emqx/edit-resource.png and /dev/null differ diff --git a/docs-cn/20-third-party/emqx/edit-resource.webp b/docs-cn/20-third-party/emqx/edit-resource.webp new file mode 100644 index 0000000000000000000000000000000000000000..fd5d278fab16bba4e04e1c348d4086dce77abb98 Binary files /dev/null and b/docs-cn/20-third-party/emqx/edit-resource.webp differ diff --git a/docs-cn/20-third-party/emqx/login-dashboard.png b/docs-cn/20-third-party/emqx/login-dashboard.png deleted file mode 100644 index d6c5035c98d860faf639ef6611c6719adf80c47b..0000000000000000000000000000000000000000 Binary files a/docs-cn/20-third-party/emqx/login-dashboard.png and /dev/null differ diff --git a/docs-cn/20-third-party/emqx/login-dashboard.webp b/docs-cn/20-third-party/emqx/login-dashboard.webp new file mode 100644 index 0000000000000000000000000000000000000000..f84cee668fb6efe1586515ba0dee3ae2f10a5b30 Binary files /dev/null and b/docs-cn/20-third-party/emqx/login-dashboard.webp differ diff --git a/docs-cn/20-third-party/emqx/rule-engine.png b/docs-cn/20-third-party/emqx/rule-engine.png deleted file mode 100644 index db110a837b024c82ee9d22f02dcd3a9d06abdd55..0000000000000000000000000000000000000000 Binary files a/docs-cn/20-third-party/emqx/rule-engine.png and /dev/null differ diff --git a/docs-cn/20-third-party/emqx/rule-engine.webp b/docs-cn/20-third-party/emqx/rule-engine.webp new file mode 100644 index 0000000000000000000000000000000000000000..c1711c8cc757cd73fef5cb941a1818756241f7f0 Binary files /dev/null and b/docs-cn/20-third-party/emqx/rule-engine.webp differ diff --git a/docs-cn/20-third-party/emqx/rule-header-key-value.png b/docs-cn/20-third-party/emqx/rule-header-key-value.png deleted file mode 100644 index b81b9a9684aa2f98d00b7ec21e5de411fb450312..0000000000000000000000000000000000000000 Binary files a/docs-cn/20-third-party/emqx/rule-header-key-value.png and /dev/null differ diff --git a/docs-cn/20-third-party/emqx/rule-header-key-value.webp b/docs-cn/20-third-party/emqx/rule-header-key-value.webp new file mode 100644 index 0000000000000000000000000000000000000000..e645b3822dffec86f4926e78a57eaffa1e7f4d8d Binary files /dev/null and b/docs-cn/20-third-party/emqx/rule-header-key-value.webp differ diff --git a/docs-cn/20-third-party/emqx/run-mock.png b/docs-cn/20-third-party/emqx/run-mock.png deleted file mode 100644 index 0da25818575247732d5d3a783aa52cf7ce24662c..0000000000000000000000000000000000000000 Binary files a/docs-cn/20-third-party/emqx/run-mock.png and /dev/null differ diff --git a/docs-cn/20-third-party/emqx/run-mock.webp b/docs-cn/20-third-party/emqx/run-mock.webp new file mode 100644 index 0000000000000000000000000000000000000000..ed33f1666d456f1ab40ed6830af4550d4c7ca037 Binary files /dev/null and b/docs-cn/20-third-party/emqx/run-mock.webp differ diff --git a/docs-cn/20-third-party/import_dashboard1.webp b/docs-cn/20-third-party/import_dashboard1.webp new file mode 100644 index 0000000000000000000000000000000000000000..d4fb374ce8bb75c8a0fbdbb9cab5b30eb29ab06d Binary files /dev/null and b/docs-cn/20-third-party/import_dashboard1.webp differ diff --git a/docs-cn/20-third-party/import_dashboard2.webp b/docs-cn/20-third-party/import_dashboard2.webp new file mode 100644 index 0000000000000000000000000000000000000000..9f74dc96be20ab64b5fb555aaccdaa1c1139b35c Binary files /dev/null and b/docs-cn/20-third-party/import_dashboard2.webp differ diff --git a/docs-cn/20-third-party/kafka/Kafka_Connect.png b/docs-cn/20-third-party/kafka/Kafka_Connect.png deleted file mode 100644 index f3dc02ea2a743c6e1ae5531e14f820e3adeca29a..0000000000000000000000000000000000000000 Binary files a/docs-cn/20-third-party/kafka/Kafka_Connect.png and /dev/null differ diff --git a/docs-cn/20-third-party/kafka/Kafka_Connect.webp b/docs-cn/20-third-party/kafka/Kafka_Connect.webp new file mode 100644 index 0000000000000000000000000000000000000000..8f2000a749b0a2ccec9939abd144c53c44fbe171 Binary files /dev/null and b/docs-cn/20-third-party/kafka/Kafka_Connect.webp differ diff --git a/docs-cn/20-third-party/kafka/confluentPlatform.png b/docs-cn/20-third-party/kafka/confluentPlatform.png deleted file mode 100644 index f8e69f2c7f64d809996b2d1bf1370b67b8030850..0000000000000000000000000000000000000000 Binary files a/docs-cn/20-third-party/kafka/confluentPlatform.png and /dev/null differ diff --git a/docs-cn/20-third-party/kafka/confluentPlatform.webp b/docs-cn/20-third-party/kafka/confluentPlatform.webp new file mode 100644 index 0000000000000000000000000000000000000000..ff03d4e51aaaec85f07ff41ecda0fb9bd6cb2847 Binary files /dev/null and b/docs-cn/20-third-party/kafka/confluentPlatform.webp differ diff --git a/docs-cn/20-third-party/kafka/streaming-integration-with-kafka-connect.png b/docs-cn/20-third-party/kafka/streaming-integration-with-kafka-connect.png deleted file mode 100644 index 26d8a866d706180c900d69bb6f57ca2dff0047dd..0000000000000000000000000000000000000000 Binary files a/docs-cn/20-third-party/kafka/streaming-integration-with-kafka-connect.png and /dev/null differ diff --git a/docs-cn/20-third-party/kafka/streaming-integration-with-kafka-connect.webp b/docs-cn/20-third-party/kafka/streaming-integration-with-kafka-connect.webp new file mode 100644 index 0000000000000000000000000000000000000000..120d534ec132cea2ccef6cf87a3ce680a5ac6e9c Binary files /dev/null and b/docs-cn/20-third-party/kafka/streaming-integration-with-kafka-connect.webp differ diff --git a/docs-cn/21-tdinternal/01-arch.md b/docs-cn/21-tdinternal/01-arch.md index 6f479efc1ad13e27899e7819d194a2df59ed3ad1..433cb4808b60ce73c639a23beef45fb8e1afb7dd 100644 --- a/docs-cn/21-tdinternal/01-arch.md +++ b/docs-cn/21-tdinternal/01-arch.md @@ -11,7 +11,7 @@ TDengine 的设计是基于单个硬件、软件系统不可靠,基于任何 TDengine 分布式架构的逻辑结构图如下: -![TDengine架构示意图](/img/architecture/structure.png) +![TDengine Database 架构示意图](./structure.webp)
图 1 TDengine架构示意图
@@ -41,7 +41,7 @@ TDengine 分布式架构的逻辑结构图如下: - 集群数据节点对外提供 RESTful 服务占用一个 TCP 端口,是 serverPort+11。 - 集群内数据节点与 Arbitrator 节点之间通讯占用一个 TCP 端口,是 serverPort+12。 -因此一个数据节点总的端口范围为 serverPort 到 serverPort+12,总共 13 个 TCP/UDP 端口。使用时,需要确保防火墙将这些端口打开。每个数据节点可以配置不同的 serverPort。详细的端口情况请参见 [TDengine 2.0 端口说明](/train-faq/faq#port) +因此一个数据节点总的端口范围为 serverPort 到 serverPort+12,总共 13 个 TCP/UDP 端口。确保集群中所有主机在端口 6030-6042 上的 TCP/UDP 协议能够互通。详细的端口情况请参见 [TDengine 2.0 端口说明](/train-faq/faq#port) **集群对外连接:**TDengine 集群可以容纳单个、多个甚至几千个数据节点。应用只需要向集群中任何一个数据节点发起连接即可,连接需要提供的网络参数是一数据节点的 End Point(FQDN 加配置的端口号)。通过命令行 CLI 启动应用 taos 时,可以通过选项-h 来指定数据节点的 FQDN,-P 来指定其配置的端口号,如果端口不配置,将采用 TDengine 的系统配置参数 serverPort。 @@ -63,7 +63,7 @@ TDengine 分布式架构的逻辑结构图如下: 为解释 vnode、mnode、taosc 和应用之间的关系以及各自扮演的角色,下面对写入数据这个典型操作的流程进行剖析。 -![TDengine典型的操作流程](/img/architecture/message.png) +![TDengine Database 典型的操作流程](./message.webp)
图 2 TDengine 典型的操作流程
@@ -135,7 +135,7 @@ TDengine 除 vnode 分片之外,还对时序数据按照时间段进行分区 Master Vnode 遵循下面的写入流程: -![TDengine Master写入流程](/img/architecture/write_master.png) +![TDengine Database Master写入流程](./write_master.webp)
图 3 TDengine Master 写入流程
@@ -150,7 +150,7 @@ Master Vnode 遵循下面的写入流程: 对于 slave vnode,写入流程是: -![TDengine Slave 写入流程](/img/architecture/write_slave.png) +![TDengine Database Slave 写入流程](./write_slave.webp)
图 4 TDengine Slave 写入流程
@@ -284,7 +284,7 @@ SELECT COUNT(*) FROM d1001 WHERE ts >= '2017-7-14 00:00:00' AND ts < '2017-7-14 TDengine 对每个数据采集点单独建表,但在实际应用中经常需要对不同的采集点数据进行聚合。为高效的进行聚合操作,TDengine 引入超级表(STable)的概念。超级表用来代表一特定类型的数据采集点,它是包含多张表的表集合,集合里每张表的模式(schema)完全一致,但每张表都带有自己的静态标签,标签可以有多个,可以随时增加、删除和修改。应用可通过指定标签的过滤条件,对一个 STable 下的全部或部分表进行聚合或统计操作,这样大大简化应用的开发。其具体流程如下图所示: -![多表聚合查询原理图](/img/architecture/multi_tables.png) +![TDengine Database 多表聚合查询原理图](./multi_tables.webp)
图 5 多表聚合查询原理图
diff --git a/docs-cn/21-tdinternal/02-replica.md b/docs-cn/21-tdinternal/02-replica.md deleted file mode 100644 index 6a384b982d22956dd514d8df05dc827ca6f8b729..0000000000000000000000000000000000000000 --- a/docs-cn/21-tdinternal/02-replica.md +++ /dev/null @@ -1,256 +0,0 @@ ---- -sidebar_label: 数据复制模块设计 -title: 数据复制模块设计 ---- - -## 数据复制概述 - -数据复制(Replication)是指同一份数据在多个物理地点保存。它的目的是防止数据丢失,提高系统的高可用性(High Availability),而且通过应用访问多个副本,提升数据查询性能。 - -在高可靠的大数据系统里,数据复制是必不可少的一大功能。数据复制又分为实时复制与非实时复制。实时复制是指任何数据的更新(包括数据的增加、删除、修改)操作,会被实时的复制到所有副本,这样任何一台机器宕机或网络出现故障,整个系统还能提供最新的数据,保证系统的正常工作。而非实时复制,是指传统的数据备份操作,按照固定的时间周期,将一份数据全量或增量复制到其他地方。如果主节点宕机,副本是很大可能没有最新数据,因此在有些场景是无法满足要求的。 - -TDengine面向的是物联网场景,需要支持数据的实时复制,来最大程度保证系统的可靠性。实时复制有两种方式,一种是异步复制,一种是同步复制。异步复制(Asynchronous Replication)是指数据由Master转发给Slave后,Master并不需要等待Slave回复确认,这种方式效率高,但有极小的概率会丢失数据。同步复制是指Master将数据转发给Slave后,需要等待Slave的回复确认,才会通知应用写入成功,这种方式效率偏低,但能保证数据绝不丢失。 - -数据复制是与数据存储(写入、读取)密切相关的,但两者又是相对独立,可以完全脱耦的。在TDengine系统中,有两种不同类型的数据,一种是时序数据,由TSDB模块负责;一种是元数据(Meta Data), 由MNODE负责。这两种性质不同的数据都需要同步功能。数据复制模块通过不同的实例启动配置参数,为这两种类型数据都提供同步功能。 - -在阅读本文之前,请先阅读《[TDengine 2.0 整体架构](/tdinternal/arch/)》,了解TDengine的集群设计和基本概念 - -特别注明:本文中提到数据更新操作包括数据的增加、删除与修改。 - -## 基本概念和定义 - -TDengine里存在vnode, mnode, vnode用来存储时序数据,mnode用来存储元数据。但从同步数据复制的模块来看,两者没有本质的区别,因此本文里的虚拟节点不仅包括vnode, 也包括mnode, vgroup也指mnode group, 除非特别注明。 - -**版本(version)**: - -一个虚拟节点组里多个虚拟节点互为备份,来保证数据的有效与可靠,是依靠虚拟节点组的数据版本号来维持的。TDengine2.0设计里,对于版本的定义如下:客户端发起增加、删除、修改的流程,无论是一条记录还是多条,只要是在一个请求里,这个数据更新请求被TDengine的一个虚拟节点收到后,经过合法性检查后,可以被写入系统时,就会被分配一个版本号。这个版本号在一个虚拟节点里从1开始,是单调连续递增的。无论这条记录是采集的时序数据还是meta data, 一样处理。当Master转发一个写入请求到slave时,必须带上版本号。一个虚拟节点将一数据更新请求写入WAL时,需要带上版本号。 - -不同虚拟节点组的数据版本号是完全独立的,互不相干的。版本号本质上是数据更新记录的transaction ID,但用来标识数据集的版本。 - -**角色(role):** - -一个虚拟节点可以是master, slave, unsynced或offline状态。 - -- master: 具有最新的数据,容许客户端往里写入数据,一个虚拟节点组,至多一个master. -- slave:与master是同步的,但不容许客户端往里写入数据,根据配置,可以容许客户端对其进行查询。 -- unsynced: 节点处于非同步状态,比如虚拟节点刚启动、或与其他虚拟节点的连接出现故障等。处于该状态时,该虚拟节点既不能提供写入,也不能提供查询服务。 -- offline: 由于宕机或网络原因,无法访问到某虚拟节点时,其他虚拟节点将该虚拟节点标为离线。但请注意,该虚拟节点本身的状态可能是unsynced或其他,但不会是离线。 - -**Quorum:** - -指数据写入成功所需要的确认数。对于异步复制,quorum设为1,具有master角色的虚拟节点自己确认即可。对于同步复制,需要至少大于等于2。原则上,Quorum >=1 并且 Quorum <= replication(副本数)。这个参数在启动一个同步模块实例时需要提供。 - -**WAL:** - -TDengine的WAL(Write Ahead Log)与cassandra的commit log, mySQL的bin log, Postgres的WAL没本质区别。没有写入数据库文件,还保存在内存的数据都会先存在WAL。当数据已经成功写入数据库数据文件,相应的WAL会被删除。但需要特别指明的是,在TDengine系统里,有几点: - -- 每个虚拟节点有自己独立的wal -- WAL里包含而且仅仅包含来自客户端的数据更新操作,每个更新操作都会被打上一个版本号 - -**复制实例:** - -复制模块只是一可执行的代码,复制实例是指正在运行的复制模块的一个实例,一个节点里,可以存在多个实例。原则上,一个节点有多少虚拟节点,就可以启动多少实例。对于副本数为1的场景,应用可以决定是否需要启动同步实例。应用启动一个同步模块的实例时,需要提供的就是虚拟节点组的配置信息,包括: - -- 虚拟节点个数,即replication number -- 各虚拟节点所在节点的信息,包括node的end point -- quorum, 需要的数据写入成功的确认数 -- 虚拟节点的初始版本号 - -## 数据复制模块的基本工作原理 - -TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性算法比较一致。总结下来,有几点: - -1. 一个vgroup里有一到多个虚拟节点,每个虚拟节点都有自己的角色 -2. 客户端只能向角色是master的虚拟节点发起数据更新操作,因为master具有最新版本的数据,如果向非Master发起数据更新操作,会直接收到错误 -3. 客户端可以向master, 也可以向角色是Slave的虚拟节点发起查询操作,但不能对unsynced的虚拟节点发起任何操作 -4. 如果master不存在,这个vgroup是不能对外提供数据更新和查询服务的 -5. master收到客户端的数据更新操作时,会将其转发给slave节点 -6. 一个虚拟节点的版本号比master低的时候,会发起数据恢复流程,成功后,才会成为slave - -数据实时复制有三个主要流程:选主、数据转发、数据恢复。后续做详细讨论。 - -## 虚拟节点之间的网络连接 - -虚拟节点之间通过TCP进行连接,节点之间的状态交换、数据包的转发都是通过这个TCP连接(peerFd)进行。为避免竞争,两个虚拟节点之间的TCP连接,总是由IP地址(UINT32)小的节点作为TCP客户端发起。一旦TCP连接被中断,虚拟节点能通过TCP socket自动检测到,将对方标为offline。如果监测到任何错误(比如数据恢复流程),虚拟节点将主动重置该连接。 - -一旦作为客户端的节点连接不成或中断,它将周期性的每隔一秒钟去试图去连接一次。因为TCP本身有心跳机制,虚拟节点之间不再另行提供心跳。 - -如果一个unsynced节点要发起数据恢复流程,它与Master将建立起专有的TCP连接(syncFd)。数据恢复完成后,该连接会被关闭。而且为限制资源的使用,系统只容许一定数量(配置参数tsMaxSyncNum)的数据恢复的socket存在。如果超过这个数字,系统会将新的数据恢复请求延后处理。 - -任意一个节点,无论有多少虚拟节点,都会启动而且只会启动一个TCP server, 来接受来自其他虚拟节点的上述两类TCP的连接请求。当TCP socket建立起来,客户端侧发送的消息体里会带有vgId(全局唯一的vgroup ID), TCP 服务器侧会检查该vgId是否已经在该节点启动运行。如果已经启动运行,就接受其请求。如果不存在,就直接将连接请求关闭。在TDengine代码里,mnode group的vgId设置为1。 - -## 选主流程 - -当同一组的两个虚拟节点之间(vnode A, vnode B)建立连接后,他们互换status消息。status消息里包含本地存储的同一虚拟节点组内所有虚拟节点的role和version。 - -如果一个虚拟节点(vnode A)检测到与同一虚拟节点组内另外一虚拟节点(vnode B)的连接中断,vnode A将立即把vnode B的role设置为offline。无论是接收到另外一虚拟节点发来的status消息,还是检测与另外一虚拟节点的连接中断,该虚拟节点都将进入状态处理流程。状态处理流程的规则如下: - -1. 如果检测到在线的节点数没有超过一半,则将自己的状态设置为unsynced. -2. 如果在线的虚拟节点数超过一半,会检查master节点是否存在,如果存在,则会决定是否将自己状态改为slave或启动数据恢复流程。 -3. 如果master不存在,则会检查自己保存的各虚拟节点的状态信息与从另一节点接收到的是否一致,如果一致,说明节点组里状态已经稳定一致,则会触发选举流程。如果不一致,说明状态还没趋于一致,即使master不存在,也不进行选主。由于要求状态信息一致才进行选举,每个虚拟节点根据同样的信息,会选出同一个虚拟节点做master,无需投票表决。 -4. 自己的状态是根据规则自己决定并修改的,并不需要其他节点同意,包括成为master。一个节点无权修改其他节点的状态。 -5. 如果一个虚拟节点检测到自己或其他虚拟节点的role发生改变,该节点会广播它自己保存的各个虚拟节点的状态信息(role和version)。 - -具体的流程图如下: - -![replica-master.png](/img/architecture/replica-master.png) - -选择Master的具体规则如下: - -1. 如果只有一个副本,该副本永远就是master -2. 所有副本都在线时,版本最高的被选为master -3. 在线的虚拟节点数过半,而且有虚拟节点是slave的话,该虚拟节点自动成为master -4. 对于2和3,如果多个虚拟节点满足成为master的要求,那么虚拟节点组的节点列表里,最前面的选为master - -按照上面的规则,如果所有虚拟节点都是unsynced(比如全部重启),只有所有虚拟节点上线,才能选出master,该虚拟节点组才能开始对外提供服务。当一个虚拟节点的role发生改变时,sync模块回通过回调函数notifyRole通知应用。 - -## 数据转发流程 - -如果vnode A是master, vnode B是slave, vnode A能接受客户端的写请求,而vnode B不能。当vnode A收到写的请求后,遵循下面的流程: - -![replica-forward.png](/img/architecture/replica-forward.png) - -1. 应用对写请求做基本的合法性检查,通过,则给该请求包打上一个版本号(version, 单调递增) -2. 应用将打上版本号的写请求封装一个WAL Head, 写入WAL(Write Ahead Log) -3. 应用调用API syncForwardToPeer,如果vnode B是slave状态,sync模块将包含WAL Head的数据包通过Forward消息发送给vnode B,否则就不转发。 -4. vnode B收到Forward消息后,调用回调函数writeToCache, 交给应用处理 -5. vnode B应用在写入成功后,都需要调用syncConfirmForward通知sync模块已经写入成功。 -6. 如果quorum大于1,vnode B需要等待应用的回复确认,收到确认后,vnode B发送Forward Response消息给node A。 -7. 如果quorum大于1,vnode A需要等待vnode B或其他副本对Forward消息的确认。 -8. 如果quorum大于1,vnode A收到quorum-1条确认消息后,调用回调函数confirmForward,通知应用写入成功。 -9. 如果quorum为1,上述6,7,8步不会发生。 -10. 如果要等待slave的确认,master会启动2秒的定时器(可配置),如果超时,则认为失败。 - -对于回复确认,sync模块提供的是异步回调函数,因此APP在调用syncForwardToPeer之后,无需等待,可以处理下一个操作。在Master与Slave的TCP连接管道里,可能有多个Forward消息,这些消息是严格按照应用提供的顺序排好的。对于Forward Response也是一样,TCP管道里存在多个,但都是排序好的。这个顺序,SYNC模块并没有做特别的事情,是由APP单线程顺序写来保证的(TDengine里每个vnode的写数据,都是单线程)。 - -## 数据恢复流程 - -如果一虚拟节点(vnode B) 处于unsynced状态,master存在(vnode A),而且其版本号比master的低,它将立即启动数据恢复流程。在理解恢复流程时,需要澄清几个关于文件的概念和处理规则。 - -1. 每个文件(无论是archived data的file还是wal)都有一个index, 这需要应用来维护(vnode里,该index就是fileId*3 + 0/1/2, 对应data, head与last三个文件)。如果index为0,表示系统里最老的数据文件。对于mode里的文件,数量是固定的,对应于acct, user, db, table等文件。 -2. 任何一个数据文件(file)有名字、大小,还有一个magic number。只有文件名、大小与magic number一致时,两个文件才判断是一样的,无需同步。Magic number可以是checksum, 也可以是简单的文件大小。怎么计算magic,换句话说,如何检测数据文件是否有效,完全由应用决定。 -3. 文件名的处理有点复杂,因为每台服务器的路径可能不一致。比如node A的TDengine的数据文件存放在 /etc/taos目录下,而node B的数据存放在 /home/jhtao目录下。因此同步模块需要应用在启动一个同步实例时提供一个path,这样两台服务器的绝对路径可以不一样,但仍然可以做对比,做同步。 -4. 当sync模块调用回调函数getFileInfo获得数据文件信息时,有如下的规则 - * index 为0,表示获取最老的文件,同时修改index返回给sync模块。如果index不为0,表示获取指定位置的文件。 - * 如果name为空,表示sync想获取位于index位置的文件信息,包括magic, size。Master节点会这么调用 - * 如果name不为空,表示sync想获取指定文件名和index的信息,slave节点会这么调用 - * 如果某个index的文件不存在,magic返回0,表示文件已经是最后一个。因此整个系统里,文件的index必须是连续的一段整数。 -5. 当sync模块调用回调函数getWalInfo获得wal信息时,有如下规则 - * index为0,表示获得最老的WAL文件, 返回时,index更新为具体的数字 - * 如果返回0,表示这是最新的一个WAL文件,如果返回值是1,表示后面还有更新的WAL文件 - * 返回的文件名为空,那表示没有WAL文件 -6. 无论是getFileInfo, 还是getWalInfo, 只要获取出错(不是文件不存在),返回-1即可,系统会报错,停止同步 - -整个数据恢复流程分为两大步骤,第一步,先恢复archived data(file), 然后恢复wal。具体流程如下: - -![replica-restore.png](/img/architecture/replica-restore.png) - -1. 通过已经建立的TCP连接,发送sync req给master节点 -2. master收到sync req后,以client的身份,向vnode B主动建立一新的专用于同步的TCP连接(syncFd) -3. 新的TCP连接建立成功后,master将开始retrieve流程,对应的,vnode B将同步启动restore流程 -4. Retrieve/Restore流程里,先处理所有archived data (vnode里的data, head, last文件),后处理WAL data。 -5. 对于archived data,master将通过回调函数getFileInfo获取数据文件的基本信息,包括文件名、magic以及文件大小。 -6. master 将获得的文件名、magic以及文件大小发给vnode B -7. vnode B将回调函数getFile获得magic和文件大小,如果两者一致,就认为无需同步,如果两者不一致 ,就认为需要同步。vnode B将结果通过消息FileAck发回master -8. 如果文件需要同步,master就调用sendfile把整个文件发往vnode B -9. 如果文件不需要同步,master(vnode A)就重复5,6,7,8,直到所有文件被处理完 - -对于WAL同步,流程如下: - -1. master节点调用回调函数getWalInfo,获取WAL的文件名。 -2. 如果getWalInfo返回值大于0,表示该文件还不是最后一个WAL,因此master调用sendfile一下把该文件发送给vnode B -3. 如果getWalInfo返回时为0,表示该文件是最后一个WAL,因为文件可能还处于写的状态中,sync模块要根据WAL Head的定义逐条读出记录,然后发往vnode B。 -4. vnode A读取TCP连接传来的数据,按照WAL Head,逐条读取,如果版本号比现有的大,调用回调函数writeToCache,交给应用处理。如果小,直接扔掉。 -5. 上述流程循环,直到所有WAL文件都被处理完。处理完后,master就会将新来的数据包通过Forward消息转发给slave。 - -从同步文件启动起,sync模块会通过inotify监控所有处理过的file以及wal。一旦发现被处理过的文件有更新变化,同步流程将中止,会重新启动。因为有可能落盘操作正在进行(比如历史数据导入,内存数据落盘),把已经处理过的文件进行了修改,需要重新同步才行。 - -对于最后一个WAL (LastWal)的处理逻辑有点复杂,因为这个文件往往是打开写的状态,有很多场景需要考虑,比如: - -- LastWal文件size在增长,需要重新读; -- LastWal文件虽然已经打开写,但内容为空; -- LastWal文件已经被关闭,应用生成了新的Last WAL文件; -- LastWal文件没有被关闭,但数据落盘的原因,没有读到完整的一条记录; -- LastWal文件没有被关闭,但数据落盘的原因,还有部分记录暂时读取不到; - -sync模块通过inotify监控LastWal文件的更新和关闭操作。而且在确认已经尽可能读完LastWal的数据后,会将对方同步状态设置为SYNC_CACHE。该状态下,master节点会将新的记录转发给vnode B,而此时vnode B并没有完成同步,需要把这些转发包先存在recv buffer里,等WAL处理完后,vnode A再把recv buffer里的数据包通过回调writeToCache交给应用处理。 - -等vnode B把这些buffered forwards处理完,同步流程才算结束,vnode B正式变为slave。 - -## Master分布均匀性问题 - -因为Master负责写、转发,消耗的资源会更多,因此Master在整个集群里分布均匀比较理想。 - -但在TDengine的设计里,如果多个虚拟节点都符合master条件,TDengine选在列表中最前面的做Master, 这样是否导致在集群里,Master数量的分布不均匀问题呢?这取决于应用的设计。 - -给一个具体例子,系统里仅仅有三个节点,IP地址分别为IP1, IP2, IP3. 在各个节点上,TDengine创建了多个虚拟节点组,每个虚拟节点组都有三个副本。如果三个副本的顺序在所有虚拟节点组里都是IP1, IP2, IP3, 那毫无疑问,master将集中在IP1这个节点,这是我们不想看到的。 - -但是,如果在创建虚拟节点组时,增加随机性,这个问题就不存在了。比如在vgroup 1, 顺序是IP1, IP2, IP3, 在vgroup 2里,顺序是IP2, IP3, IP1, 在vgroup 3里,顺序是IP3, IP1, IP2。最后master的分布会是均匀的。 - -因此在创建一个虚拟节点组时,应用需要保证节点的顺序是round robin或完全随机。 - -## 少数虚拟节点写入成功的问题 - -在某种情况下,写入成功的确认数大于0,但小于配置的Quorum, 虽然有虚拟节点数据更新成功,master仍然会认为数据更新失败,并通知客户端写入失败。 - -这个时候,系统存在数据不一致的问题,因为有的虚拟节点已经写入成功,而有的写入失败。一个处理方式是,Master重置(reset)与其他虚拟节点的连接,该虚拟节点组将自动进入选举流程。按照规则,已经成功写入数据的虚拟节点将成为新的master,组内的其他虚拟节点将从master那里恢复数据。 - -因为写入失败,客户端会重新写入数据。但对于TDengine而言,是OK的。因为时序数据都是有时间戳的,时间戳相同的数据更新操作,第一次会执行,但第二次会自动扔掉。对于Meta Data(增加、删除库、表等等)的操作,也是OK的。一张表、库已经被创建或删除,再创建或删除,不会被执行的。 - -在TDengine的设计里,虚拟节点与虚拟节点之间,是一个TCP连接,是一个pipeline,数据块一个接一个按顺序在这个pipeline里等待处理。一旦某个数据块的处理失败,这个连接会被重置,后续的数据块的处理都会失败。因此不会存在Pipeline里一个数据块更新失败,但下一个数据块成功的可能。 - -## Split Brain的问题 - -选举流程中,有个强制要求,那就是一定有超过半数的虚拟节点在线。但是如果replication正好是偶数,这个时候,完全可能存在splt brain问题。 - -为解决这个问题,TDengine提供Arbitrator的解决方法。Arbitrator是一个节点,它的任务就是接受任何虚拟节点的连接请求,并保持它。 - -在启动复制模块实例时,在配置参数中,应用可以提供Arbitrator的IP地址。如果是奇数个副本,复制模块不会与这个arbitrator去建立连接,但如果是偶数个副本,就会主动去建立连接。 - -Arbitrator的程序tarbitrator.c在复制模块的同一目录, 编译整个系统时,会在bin目录生成。命令行参数“-?”查看可以配置的参数,比如绑定的IP地址,监听的端口号。 - -## 与RAFT相比的异同 - -数据一致性协议流行的有两种,Paxos与Raft. 本设计的实现与Raft有很多类同之处,下面做一些比较 - -相同之处: - -- 三大流程一致:Raft里有Leader election, replication, safety,完全对应TDengine的选举、数据转发、数据恢复三个流程。 -- 节点状态定义一致:Raft里每个节点有Leader, Follower, Candidate三个状态,TDengine里是Master, Slave, Unsynced, Offline。多了一个offlince, 但本质上是一样的,因为offline是外界看一个节点的状态,但该节点本身是处于master, slave 或unsynced的。 -- 数据转发流程完全一样,Master(leader)需要等待回复确认。 -- 数据恢复流程几乎一样,Raft没有涉及历史数据同步问题,只考虑了WAL数据同步。 - -不同之处: - -- 选举流程不一样:Raft里任何一个节点是candidate时,主动向其他节点发出vote request,如果超过半数回答Yes,这个candidate就成为Leader,开始一个新的term。而TDengine的实现里,节点上线、离线或角色改变都会触发状态消息在节点组内传播,等节点组里状态稳定一致之后才触发选举流程,因为状态稳定一致,基于同样的状态信息,每个节点做出的决定会是一致的,一旦某个节点符合成为master的条件,无需其他节点认可,它会自动将自己设为master。TDengine里,任何一个节点检测到其他节点或自己的角色发生改变,就会向节点组内其他节点进行广播。Raft里不存在这样的机制,因此需要投票来解决。 -- 对WAL的一条记录,Raft用term + index来做唯一标识。但TDengine只用version(类似index),在TDengine实现里,仅仅用version是完全可行的, 因为TDengine的选举机制,没有term的概念。 - -如果整个虚拟节点组全部宕机,重启,但不是所有虚拟节点都上线,这个时候TDengine是不会选出master的,因为未上线的节点有可能有最高version的数据。而RAFT协议,只要超过半数上线,就会选出Leader。 - -## Meta Data的数据复制 - -TDengine里存在时序数据,也存在Meta Data。Meta Data对数据的可靠性要求更高,那么TDengine设计能否满足要求呢?下面做个仔细分析。 - -TDengine里Meta Data包括以下: - -- account 信息 -- 一个account下面,可以有多个user, 多个DB -- 一个DB下面有多个vgroup -- 一个DB下面有多个stable -- 一个vgroup下面有多个table -- 整个系统有多个mnode, dnode -- 一个dnode可以有多个vnode - -上述的account, user, DB, vgroup, table, stable, mnode, dnode都有自己的属性,这些属性是TDengine自己定义的,不会开放给用户进行修改。这些Meta Data的查询都比较简单,都可以采用key-value模型进行存储。这些Meta Data还具有几个特点: - -1. 上述的Meta Data之间有一定的层级关系,比如必须先创建DB,才能创建table, stable。只有先创建dnode,才可能创建vnode, 才可能创建vgroup。因此他们创建的顺序是绝对不能错的。 -2. 在客户端应用的数据更新操作得到TDengine服务器侧确认后,所执行的数据更新操作绝对不能丢失。否则会造成客户端应用与服务器的数据不一致。 -3. 上述的Meta Data是容许重复操作的。比如插入新记录后,再插入一次,删除一次后,再删除一次,更新一次后,再更新一次,不会对系统产生任何影响,不会改变系统任何状态。 - -对于特点1,本设计里,数据的写入是单线程的,按照到达的先后顺序,给每个数据更新操作打上版本号,版本号大的记录一定是晚于版本号小的写入系统,数据写入顺序是100%保证的,绝对不会让版本号大的记录先写入。复制过程中,数据块的转发也是严格按照顺序进行的,因此TDengine的数据复制设计是能保证Meta Data的创建顺序的。 - -对于特点2,只要Quorum数设置等于replica,那么一定能保证回复确认过的数据更新操作不会在服务器侧丢失。即使某节点永不起来,只要超过一半的节点还是online, 查询服务不会受到任何影响。这时,如果某个节点离线超过一定时长,系统可以自动补充新的节点,以保证在线的节点数在绝大部分时间是100%的。 - -对于特点3,完全可能发生,服务器确实持久化存储了某一数据更新操作,但客户端应用出了问题,认为操作不成功,它会重新发起操作。但对于Meta Data而言,没有关系,客户端可以再次发起同样的操作,不会有任何影响。 - -总结来看,只要quorum设置大于一,本数据复制的设计是能满足Meta Data的需求的。目前,还没有发现漏洞。 diff --git a/docs-cn/21-tdinternal/03-taosd.md b/docs-cn/21-tdinternal/03-taosd.md deleted file mode 100644 index 6a5734102c85db291339ce93a2231cb8196053f6..0000000000000000000000000000000000000000 --- a/docs-cn/21-tdinternal/03-taosd.md +++ /dev/null @@ -1,119 +0,0 @@ ---- -sidebar_label: taosd 的设计 -title: taosd的设计 ---- - -逻辑上,TDengine 系统包含 dnode,taosc 和 App,dnode 是服务器侧执行代码 taosd 的一个运行实例,因此 taosd 是 TDengine 的核心,本文对 taosd 的设计做一简单的介绍,模块内的实现细节请见其他文档。 - -## 系统模块图 - -taosd 包含 rpc,dnode,vnode,tsdb,query,cq,sync,wal,mnode,http,monitor 等模块,具体如下图: - -![modules.png](/img/architecture/modules.png) - -taosd 的启动入口是 dnode 模块,dnode 然后启动其他模块,包括可选配置的 http,monitor 模块。taosc 或 dnode 之间交互的消息都是通过 rpc 模块进行,dnode 模块根据接收到的消息类型,将消息分发到 vnode 或 mnode 的消息队列,或由 dnode 模块自己消费。dnode 的工作线程(worker)消费消息队列里的消息,交给 mnode 或 vnode 进行处理。下面对各个模块做简要说明。 - -## RPC 模块 - -该模块负责 taosd 与 taosc,以及其他数据节点之间的通讯。TDengine 没有采取标准的 HTTP 或 gRPC 等第三方工具,而是实现了自己的通讯模块 RPC。 - -考虑到物联网场景下,数据写入的包一般不大,因此除支持 TCP 连接之外,RPC 还支持 UDP 连接。当数据包小于 15K 时,RPC 将采用 UDP 方式进行连接,否则将采用 TCP 连接。对于查询类的消息,RPC 不管包的大小,总是采取 TCP 连接。对于 UDP 连接,RPC 实现了自己的超时、重传、顺序检查等机制,以保证数据可靠传输。 - -RPC 模块还提供数据压缩功能,如果数据包的字节数超过系统配置参数 compressMsgSize,RPC 在传输中将自动压缩数据,以节省带宽。 - -为保证数据的安全和数据的 integrity,RPC 模块采用 MD5 做数字签名,对数据的真实性和完整性进行认证。 - -## DNODE 模块 - -该模块是整个 taosd 的入口,它具体负责如下任务: - -- 系统的初始化,包括 - - 从文件 taos.cfg 读取系统配置参数,从文件 dnodeCfg.json 读取数据节点的配置参数; - - 启动 RPC 模块,并建立起与 taosc 通讯的 server 连接,与其他数据节点通讯的 server 连接; - - 启动并初始化 dnode 的内部管理,该模块将扫描该数据节点已有的 vnode ,并打开它们; - - 初始化可配置的模块,如 mnode,http,monitor 等。 -- 数据节点的管理,包括 - - 定时的向 mnode 发送 status 消息,报告自己的状态; - - 根据 mnode 的指示,创建、改变、删除 vnode; - - 根据 mnode 的指示,修改自己的配置参数; -- 消息的分发、消费,包括 - - 为每一个 vnode 和 mnode 的创建并维护一个读队列、一个写队列; - - 将从 taosc 或其他数据节点来的消息,根据消息类型,将其直接分发到不同的消息队列,或由自己的管理模块直接消费; - - 维护一个读的线程池,消费读队列的消息,交给 vnode 或 mnode 处理。为支持高并发,一个读线程(worker)可以消费多个队列的消息,一个读队列可以由多个 worker 消费; - - 维护一个写的线程池,消费写队列的消息,交给 vnode 或 mnode 处理。为保证写操作的序列化,一个写队列只能由一个写线程负责,但一个写线程可以负责多个写队列。 - -taosd 的消息消费由 dnode 通过读写线程池进行控制,是系统的中枢。该模块内的结构体图如下: - -![dnode.png](/img/architecture/dnode.png) - -## VNODE 模块 - -vnode 是一独立的数据存储查询逻辑单元,但因为一个 vnode 只能容许一个 DB ,因此 vnode 内部没有 account,DB,user 等概念。为实现更好的模块化、封装以及未来的扩展,它有很多子模块,包括负责存储的 TSDB,负责查询的 query,负责数据复制的 sync,负责数据库日志的的 WAL,负责连续查询的 cq(continuous query),负责事件触发的流计算的 event 等模块,这些子模块只与 vnode 模块发生关系,与其他模块没有任何调用关系。模块图如下: - -![vnode.png](/img/architecture/vnode.png) - -vnode 模块向下,与 dnodeVRead,dnodeVWrite 发生互动,向上,与子模块发生互动。它主要的功能有: - -- 协调各个子模块的互动。各个子模块之间都不直接调用,都需要通过 vnode 模块进行; -- 对于来自 taosc 或 mnode 的写操作,vnode 模块将其分解为写日志(WAL),转发(sync),本地存储(TSDB)子模块的操作; -- 对于查询操作,分发到 query 模块进行。 - -一个数据节点里有多个 vnode,因此 vnode 模块是有多个运行实例的。每个运行实例是完全独立的。 - -vnode 与其子模块是通过 API 直接调用,而不是通过消息队列传递。而且各个子模块只与 vnode 模块有交互,不与 dnode,rpc 等模块发生任何直接关联。 - -## MNODE 模块 - -mnode 是整个系统的大脑,负责整个系统的资源调度,负责 meta data 的管理与存储。 - -一个运行的系统里,只有一个 mnode,但它有多个副本(由系统配置参数 numOfMnodes 控制)。这些副本分布在不同的 dnode 里,目的是保证系统的高可靠运行。副本之间的数据复制是采用同步而非异步的方式,以确保数据的一致性,确保数据不会丢失。这些副本会自动选举一个 Master,其他副本是 slave。所有数据更新类的操作,都只能在 master 上进行,而查询类的可以在 slave 节点上进行。代码实现上,同步模块与 vnode 共享,但 mnode 被分配一个特殊的 vgroup ID: 1,而且 quorum 大于 1。整个集群系统是由多个 dnode 组成的,运行的 mnode 的副本数不可能超过 dnode 的个数,但不会超过配置的副本数。如果某个 mnode 副本宕机一段时间,只要超过半数的 mnode 副本仍在运行,运行的 mnode 会自动根据整个系统的资源情况,在其他 dnode 里再启动一个 mnode,以保证运行的副本数。 - -各个 dnode 通过信息交换,保存有 mnode 各个副本的 End Point 列表,并向其中的 master 节点定时(间隔由系统配置参数 statusInterval 控制)发送 status 消息,消息体里包含该 dnode 的 CPU、内存、剩余存储空间、vnode 个数,以及各个 vnode 的状态(存储空间、原始数据大小、记录条数、角色等)。这样 mnode 就了解整个系统的资源情况,如果用户创建新的表,就可以决定需要在哪个 dnode 创建;如果增加或删除 dnode,或者监测到某 dnode 数据过热、或离线太长,就可以决定需要挪动那些 vnode,以实现负载均衡。 - -mnode 里还负责 account,user,DB,stable,table,vgroup,dnode 的创建、删除与更新。mnode 不仅把这些 entity 的 meta data 保存在内存,还做持久化存储。但为节省内存,各个表的标签值不保存在 mnode(保存在 vnode),而且子表不维护自己的 schema,而是与 stable 共享。为减小 mnode 的查询压力,taosc 会缓存 table、stable 的 schema。对于查询类的操作,各个 slave mnode 也可以提供,以减轻 master 压力。 - -## TSDB 模块 - -TSDB 模块是 vnode 中的负责快速高并发地存储和读取属于该 vnode 的表的元数据及采集的时序数据的引擎。除此之外,TSDB 还提供了表结构的修改、表标签值的修改等功能。TSDB 提供 API 供 vnode 和 query 等模块调用。TSDB 中存储了两类数据,1:元数据信息;2:时序数据 - -### 元数据信息 - -TSDB 中存储的元数据包含属于其所在的 vnode 中表的类型,schema 的定义等。对于超级表和超级表下的子表而言,又包含了 tag 的 schema 定义以及子表的 tag 值等。对于元数据信息而言,TSDB 就相当于一个全内存的 KV 型数据库,属于该 vnode 的表对象全部在内存中,方便快速查询表的信息。除此之外,TSDB 还对其中的子表,按照 tag 的第一列取值做了全内存的索引,大大加快了对于标签的过滤查询。TSDB 中的元数据的最新状态在落盘时,会以追加(append-only)的形式,写入到 meta 文件中。meta 文件只进行追加操作,即便是元数据的删除,也会以一条记录的形式写入到文件末尾。TSDB 也提供了对于元数据的修改操作,如表 schema 的修改,tag schema 的修改以及 tag 值的修改等。 - -### 时序数据 - -每个 TSDB 在创建时,都会事先分配一定量的内存缓冲区,且内存缓冲区的大小可配可修改。表采集的时序数据,在写入 TSDB 时,首先以追加的方式写入到分配的内存缓冲区中,同时建立基于时间戳的内存索引,方便快速查询。当内存缓冲区的数据积累到一定的程度时(达到内存缓冲区总大小的 1/3),则会触发落盘操作,将缓冲区中的数据持久化到硬盘文件上。时序数据在内存缓冲区中是以行(row)的形式存储的。 - -而时序数据在写入到 TSDB 的数据文件时,是以列(column)的形式存储的。TSDB 中的数据文件包含多个数据文件组,每个数据文件组中又包含 .head、.data 和 .last 三个文件,如(v2f1801.head、v2f1801.data、v2f1801.last)数据文件组。TSDB 中的数据文件组是按照时间跨度进行分片的,默认是 10 天一个文件组,且可通过配置文件及建库选项进行配置。分片的数据文件组又按照编号递增排列,方便快速定位某一时间段的时序数据,高效定位数据文件组。时序数据在 TSDB 的数据文件中是以块的形式进行列式存储的,每个块中只包含一张表的数据,且数据在一个块中是按照时间顺序递增排列的。在一个数据文件组中,.head 文件负责存储数据块的索引及统计信息,如每个块的位置,压缩算法,时间戳范围等。存储在 .head 文件中一张表的索引信息是按照数据块中存储的数据的时间递增排列的,方便进行折半查找等工作。.head 和 .last 文件是存储真实数据块的文件,若数据块中的数据累计到一定程度,则会写入 .data 文件中,否则,会写入 .last 文件中,等待下次落盘时合并数据写入 .data 文件中,从而大大减少文件中块的个数,避免数据的过度碎片化。 - -## Query 模块 - -该模块负责整体系统的查询处理。客户端调用该该模块进行 SQL 语法解析,并将查询或写入请求发送到 vnode ,同时负责针对超级表的查询进行二阶段的聚合操作。在 vnode 端,该模块调用 TSDB 模块读取系统中存储的数据进行查询处理。query 模块还定义了系统能够支持的全部查询函数,查询函数的实现机制与查询框架无耦合,可以在不修改查询流程的情况下动态增加查询函数。详细的设计请参见《TDengine 2.0 查询模块设计》。 - -## SYNC 模块 - -该模块实现数据的多副本复制,包括 vnode 与 mnode 的数据复制,支持异步和同步两种复制方式,以满足 meta data 与时序数据不同复制的需求。因为它为 mnode 与 vnode 共享,系统为 mnode 副本预留了一个特殊的 vgroup ID:1。因此 vnode group 的 ID 是从 2 开始的。 - -每个 vnode/mnode 模块实例会有一对应的 sync 模块实例,他们是一一对应的。详细设计请见[TDengine 2.0 数据复制模块设计](/tdinternal/replica/) - -## WAL 模块 - -该模块负责将新插入的数据写入 write ahead log(WAL),为 vnode,mnode 共享。以保证服务器 crash 或其他故障,能从 WAL 中恢复数据。 - -每个 vnode/mnode 模块实例会有一对应的 WAL 模块实例,是完全一一对应的。WAL 的落盘操作由两个参数 walLevel,fsync 控制。看具体场景,如果要 100% 保证数据不会丢失,需要将 walLevel 配置为 2,fsync 设置为 0,每条数据插入请求,都会实时落盘后,才会给应用确认 - -## HTTP 模块 - -该模块负责处理系统对外的 RESTful 接口,可以通过配置,由 dnode 启动或停止 。(仅 2.2 及之前的版本中存在) - -该模块将接收到的 RESTful 请求,做了各种合法性检查后,将其变成标准的 SQL 语句,通过 taosc 的异步接口,将请求发往整个系统中的任一 dnode 。收到处理后的结果后,再翻译成 HTTP 协议,返回给应用。 - -如果 HTTP 模块启动,就意味着启动了一个 taosc 的实例。任一一个 dnode 都可以启动该模块,以实现对 RESTful 请求的分布式处理。 - -## Monitor 模块 - -该模块负责检测一个 dnode 的运行状态,可以通过配置,由 dnode 启动或停止。原则上,每个 dnode 都应该启动一个 monitor 实例。 - -Monitor 采集 TDengine 里的关键操作,比如创建、删除、更新账号、表、库等,而且周期性的收集 CPU、内存、网络等资源的使用情况(采集周期由系统配置参数 monitorInterval 控制)。获得这些数据后,monitor 模块将采集的数据写入系统的日志库(DB 名字由系统配置参数 monitorDbName 控制)。 - -Monitor 模块使用 taosc 来将采集的数据写入系统,因此每个 monitor 实例,都有一个 taosc 运行实例。 diff --git a/docs-cn/21-tdinternal/12-tsz-compress.md b/docs-cn/21-tdinternal/12-tsz-compress.md deleted file mode 100644 index baf5df15db3b44edc9e0bd6909e46fa84b676a0b..0000000000000000000000000000000000000000 --- a/docs-cn/21-tdinternal/12-tsz-compress.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: TSZ 压缩算法 ---- - -TSZ 压缩算法是 TDengine 为浮点数据类型提供更加丰富的压缩功能,可以实现浮点数的有损至无损全状态压缩,相比原来在 TDengine 中原有压缩算法,TSZ 压缩算法压缩选项更丰富,压缩率更高,即使切到无损状态下对浮点数压缩,压缩率也会比原来的压缩算法高一倍。 - -## 适合场景 - -TSZ 压缩算法压缩率比原来的要高,但压缩时间会更长,即开启 TSZ 压缩算法写入速度会有一些下降,通常情况下会有 20% 左右的下降。影响写入速度是因为需要更多的 CPU 计算,所以从原始数据到压缩好数据的交付时间变长,导致写入速度变慢。如果您的服务器 CPU 配置很高的话,这个影响会变小甚至没有。 - -另外如果设备产生了大量的高精度浮点数,存储占用的空间非常庞大,但实际使用并不需要那么高的精度时,可以通过 TSZ 压缩的有损压缩功能,把精度压缩至指定的长度,节约存储空间。 - -总结:采集到了大量浮点数,存储时占用空间过大或出有存储空间不足,需要超高压缩率的场景。 - -## 使用步骤 - -- 检查版本支持,2.4.0.10 及之后 TDengine 的版本都支持此功能 - -- 配置选项开启功能,在 TDengine 的配置文件 taos.cfg 增加一行以下内容,打开 TSZ 功能 - -```TSZ -lossyColumns float|double -``` - -- 根据自己需要配置其它选项,如果不配置都会按默认值处理。 - -- 重启服务,配置生效。 -- 确认功能已开启,在服务启动过程中输出的信息如果有前面配置的内容,表明功能已生效: - -```TSZ Test -02/22 10:49:27.607990 00002933 UTL lossyColumns float|double -``` - -## 注意事项 - -- 确认版本是否支持 - -- 除了服务器启动时的输出的配置成功信息外,不再会有其它的信息输出是使用的哪种压缩算法,可以通过配置前后数据库文件大小来比较效果 - -- 如果浮点数类型列较少,看整体数据文件大小效果会不太明显 - -- 此压缩产生的数据文件中浮点数据部分将不能被 2.4.0.10 以下的版本解析,即不向下兼容,使用时避免更换回旧版本,以免数据不能被读取出来。 - -- 在使用过程中允许反复开启和关闭 TSZ 压缩选项的操作,前后两种压缩算法产生的数据都能正常读取。 diff --git a/docs-cn/21-tdinternal/30-iot-big-data.md b/docs-cn/21-tdinternal/30-iot-big-data.md deleted file mode 100644 index a234713f883056e3d1a0dcbfe8e2e47a82865f81..0000000000000000000000000000000000000000 --- a/docs-cn/21-tdinternal/30-iot-big-data.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: 物联网大数据 -description: "物联网、工业互联网大数据的特点;物联网大数据平台应具备的功能和特点;通用大数据架构为什么不适合处理物联网数据;物联网、车联网、工业互联网大数据平台,为什么推荐使用 TDengine" ---- - -- [物联网、工业互联网大数据的特点](https://www.taosdata.com/blog/2019/07/09/105.html) -- [物联网大数据平台应具备的功能和特点](https://www.taosdata.com/blog/2019/07/29/542.html) -- [通用大数据架构为什么不适合处理物联网数据?](https://www.taosdata.com/blog/2019/07/09/107.html) -- [物联网、车联网、工业互联网大数据平台,为什么推荐使用 TDengine?](https://www.taosdata.com/blog/2019/07/09/109.html) diff --git a/docs-cn/21-tdinternal/dnode.webp b/docs-cn/21-tdinternal/dnode.webp new file mode 100644 index 0000000000000000000000000000000000000000..a56c7e4594df00a721cb48381d68ca3bc813cdc8 Binary files /dev/null and b/docs-cn/21-tdinternal/dnode.webp differ diff --git a/docs-cn/21-tdinternal/message.webp b/docs-cn/21-tdinternal/message.webp new file mode 100644 index 0000000000000000000000000000000000000000..a2a42abff3d6e932b41a3abe9feae4a5cc13c9e5 Binary files /dev/null and b/docs-cn/21-tdinternal/message.webp differ diff --git a/docs-cn/21-tdinternal/modules.webp b/docs-cn/21-tdinternal/modules.webp new file mode 100644 index 0000000000000000000000000000000000000000..718a6abccdbe40d4a0df5e3812fe0ab943a7c523 Binary files /dev/null and b/docs-cn/21-tdinternal/modules.webp differ diff --git a/docs-cn/21-tdinternal/multi_tables.webp b/docs-cn/21-tdinternal/multi_tables.webp new file mode 100644 index 0000000000000000000000000000000000000000..8f649e34a3a62d1b11b4403b2e743ff6b5e47be2 Binary files /dev/null and b/docs-cn/21-tdinternal/multi_tables.webp differ diff --git a/docs-cn/21-tdinternal/replica-forward.webp b/docs-cn/21-tdinternal/replica-forward.webp new file mode 100644 index 0000000000000000000000000000000000000000..512efd4eba8f23ad0f8607eaaf5525f51ecdcf0e Binary files /dev/null and b/docs-cn/21-tdinternal/replica-forward.webp differ diff --git a/docs-cn/21-tdinternal/replica-master.webp b/docs-cn/21-tdinternal/replica-master.webp new file mode 100644 index 0000000000000000000000000000000000000000..57030a11f563af2689dbcfd206183f410b121aee Binary files /dev/null and b/docs-cn/21-tdinternal/replica-master.webp differ diff --git a/docs-cn/21-tdinternal/replica-restore.webp b/docs-cn/21-tdinternal/replica-restore.webp new file mode 100644 index 0000000000000000000000000000000000000000..f282c2d4d23f517e3ef08e906cea7e9c5edc0b2a Binary files /dev/null and b/docs-cn/21-tdinternal/replica-restore.webp differ diff --git a/docs-cn/21-tdinternal/structure.webp b/docs-cn/21-tdinternal/structure.webp new file mode 100644 index 0000000000000000000000000000000000000000..b77a42c074b15302b5c3ab889fb550a46dd549b3 Binary files /dev/null and b/docs-cn/21-tdinternal/structure.webp differ diff --git a/docs-cn/21-tdinternal/vnode.webp b/docs-cn/21-tdinternal/vnode.webp new file mode 100644 index 0000000000000000000000000000000000000000..fae3104c89c542c26790b509d12ad56661082c32 Binary files /dev/null and b/docs-cn/21-tdinternal/vnode.webp differ diff --git a/docs-cn/21-tdinternal/write_master.webp b/docs-cn/21-tdinternal/write_master.webp new file mode 100644 index 0000000000000000000000000000000000000000..9624036ed3d46ed60924ead9ce5c61acee0f4652 Binary files /dev/null and b/docs-cn/21-tdinternal/write_master.webp differ diff --git a/docs-cn/21-tdinternal/write_slave.webp b/docs-cn/21-tdinternal/write_slave.webp new file mode 100644 index 0000000000000000000000000000000000000000..7c45dec11b00e6a738de458f9e1bedacfad75a96 Binary files /dev/null and b/docs-cn/21-tdinternal/write_slave.webp differ diff --git a/docs-cn/25-application/01-telegraf.md b/docs-cn/25-application/01-telegraf.md index f63a6701eed2b4c5b98f577d5b2867ae6dada387..95df8699ef85b02d6e9dba398c787644fc9089b2 100644 --- a/docs-cn/25-application/01-telegraf.md +++ b/docs-cn/25-application/01-telegraf.md @@ -16,7 +16,7 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如 本文介绍不需要写一行代码,通过简单修改几行配置文件,就可以快速搭建一个基于 TDengine + Telegraf + Grafana 的 IT 运维系统。架构如下图: -![IT-DevOps-Solutions-Telegraf.png](/img/IT-DevOps-Solutions-Telegraf.png) +![TDengine Database IT-DevOps-Solutions-Telegraf](./IT-DevOps-Solutions-Telegraf.webp) ## 安装步骤 @@ -75,7 +75,7 @@ sudo systemctl start telegraf 点击左侧齿轮图标并选择 `Plugins`,应该可以找到 TDengine data source 插件图标。 点击左侧加号图标并选择 `Import`,从 `https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json` 下载 dashboard JSON 文件后导入。之后可以看到如下界面的仪表盘: -![IT-DevOps-Solutions-telegraf-dashboard.png](/img/IT-DevOps-Solutions-telegraf-dashboard.png) +![TDengine Database IT-DevOps-Solutions-telegraf-dashboard](./IT-DevOps-Solutions-telegraf-dashboard.webp) ## 总结 diff --git a/docs-cn/25-application/02-collectd.md b/docs-cn/25-application/02-collectd.md index 5e6bc6577b2f4c8564e4533ced745d0b214ec748..78c61bb969092d7040ddcb3d02ce7bd29a784858 100644 --- a/docs-cn/25-application/02-collectd.md +++ b/docs-cn/25-application/02-collectd.md @@ -16,7 +16,7 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如 本文介绍不需要写一行代码,通过简单修改几行配置文件,就可以快速搭建一个基于 TDengine + collectd / statsD + Grafana 的 IT 运维系统。架构如下图: -![IT-DevOps-Solutions-Collectd-StatsD.png](/img/IT-DevOps-Solutions-Collectd-StatsD.png) +![TDengine Database IT-DevOps-Solutions-Collectd-StatsD](./IT-DevOps-Solutions-Collectd-StatsD.webp) ## 安装步骤 @@ -81,12 +81,12 @@ repeater 部分添加 { host:'', port: -### 18. go 语言编写组件编译失败怎样解决? +### 19. go 语言编写组件编译失败怎样解决? TDengine 2.3.0.0 及之后的版本包含一个使用 go 语言开发的 taosAdapter 独立组件,需要单独运行,取代之前 taosd 内置的 httpd ,提供包含原 httpd 功能以及支持多种其他软件(Prometheus、Telegraf、collectd、StatsD 等)的数据接入功能。 使用最新 develop 分支代码编译需要先 `git submodule update --init --recursive` 下载 taosAdapter 仓库代码后再编译。 @@ -184,7 +195,7 @@ go env -w GOPROXY=https://goproxy.cn,direct 如果希望继续使用之前的内置 httpd,可以关闭 taosAdapter 编译,使用 `cmake .. -DBUILD_HTTP=true` 使用原来内置的 httpd。 -### 19. 如何查询数据占用的存储空间大小? +### 20. 如何查询数据占用的存储空间大小? 默认情况下,TDengine 的数据文件存储在 /var/lib/taos ,日志文件存储在 /var/log/taos 。 @@ -193,3 +204,38 @@ go env -w GOPROXY=https://goproxy.cn,direct 若想查看单个数据库占用的大小,可在命令行程序 taos 内指定要查看的数据库后执行 `show vgroups;` ,通过得到的 VGroup id 去 /var/lib/taos/vnode 下查看包含的文件夹大小。 若仅仅想查看指定(超级)表的数据块分布及大小,可查看[_block_dist 函数](https://docs.taosdata.com/taos-sql/select/#_block_dist-%E5%87%BD%E6%95%B0) + +### 21. 客户端连接串如何保证高可用? + +请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2021/04/16/2287.html) + +### 22. 时间戳的时区信息是怎样处理的? + +TDengine 中时间戳的时区总是由客户端进行处理,而与服务端无关。具体来说,客户端会对 SQL 语句中的时间戳进行时区转换,转为 UTC 时区(即 Unix 时间戳——Unix Timestamp)再交由服务端进行写入和查询;在读取数据时,服务端也是采用 UTC 时区提供原始数据,客户端收到后再根据本地设置,把时间戳转换为本地系统所要求的时区进行显示。 + +客户端在处理时间戳字符串时,会采取如下逻辑: + +1. 在未做特殊设置的情况下,客户端默认使用所在操作系统的时区设置。 +2. 如果在 taos.cfg 中设置了 timezone 参数,则客户端会以这个配置文件中的设置为准。 +3. 如果在 C/C++/Java/Python 等各种编程语言的 Connector Driver 中,在建立数据库连接时显式指定了 timezone,那么会以这个指定的时区设置为准。例如 Java Connector 的 JDBC URL 中就有 timezone 参数。 +4. 在书写 SQL 语句时,也可以直接使用 Unix 时间戳(例如 `1554984068000`)或带有时区的时间戳字符串,也即以 RFC 3339 格式(例如 `2013-04-12T15:52:01.123+08:00`)或 ISO-8601 格式(例如 `2013-04-12T15:52:01.123+0800`)来书写时间戳,此时这些时间戳的取值将不再受其他时区设置的影响。 + +### 23. TDengine 2.0 都会用到哪些网络端口? + +使用到的网络端口请看文档:[serverport](/reference/config/#serverport) + +需要注意,文档上列举的端口号都是以默认端口 6030 为前提进行说明,如果修改了配置文件中的设置,那么列举的端口都会随之出现变化,管理员可以参考上述的信息调整防火墙设置。 + +### 24. 为什么 RESTful 接口无响应、Grafana 无法添加 TDengine 为数据源、TDengineGUI 选了 6041 端口还是无法连接成功?? + +taosAdapter 从 TDengine 2.4.0.0 版本开始成为 TDengine 服务端软件的组成部分,是 TDengine 集群和应用程序之间的桥梁和适配器。在此之前 RESTful 接口等功能是由 taosd 内置的 HTTP 服务提供的,而如今要实现上述功能需要执行:```systemctl start taosadapter``` 命令来启动 taosAdapter 服务。 + +需要说明的是,taosAdapter 的日志路径 path 需要单独配置,默认路径是 /var/log/taos ;日志等级 logLevel 有 8 个等级,默认等级是 info ,配置成 panic 可关闭日志输出。请注意操作系统 / 目录的空间大小,可通过命令行参数、环境变量或配置文件来修改配置,默认配置文件是 /etc/taos/taosadapter.toml 。 + +有关 taosAdapter 组件的详细介绍请看文档:[taosAdapter](https://docs.taosdata.com/reference/taosadapter/) + +### 25. 发生了 OOM 怎么办? + +OOM 是操作系统的保护机制,当操作系统内存(包括 SWAP )不足时,会杀掉某些进程,从而保证操作系统的稳定运行。通常内存不足主要是如下两个原因导致,一是剩余内存小于 vm.min_free_kbytes ;二是程序请求的内存大于剩余内存。还有一种情况是内存充足但程序占用了特殊的内存地址,也会触发 OOM 。 + +TDengine 会预先为每个 VNode 分配好内存,每个 Database 的 VNode 个数受 maxVgroupsPerDb 影响,每个 VNode 占用的内存大小受 Blocks 和 Cache 影响。要防止 OOM,需要在项目建设之初合理规划内存,并合理设置 SWAP ,除此之外查询过量的数据也有可能导致内存暴涨,这取决于具体的查询语句。TDengine 企业版对内存管理做了优化,采用了新的内存分配器,对稳定性有更高要求的用户可以考虑选择企业版。 diff --git a/docs-cn/27-train-faq/02-video.mdx b/docs-cn/27-train-faq/02-video.mdx deleted file mode 100644 index b644412332fe817ea7fdc2c9ddc176ecc9858c56..0000000000000000000000000000000000000000 --- a/docs-cn/27-train-faq/02-video.mdx +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: 视频教程 ---- - -## 技术公开课 - -- [技术公开课:开源、高效的物联网大数据平台,TDengine 内核技术剖析](https://www.taosdata.com/blog/2020/12/25/2126.html) - -## 视频教程 - -- [TDengine 视频教程 - 快速上手](https://www.taosdata.com/blog/2020/11/11/1941.html) -- [TDengine 视频教程 - 数据建模](https://www.taosdata.com/blog/2020/11/11/1945.html) -- [TDengine 视频教程 - 集群搭建](https://www.taosdata.com/blog/2020/11/11/1961.html) -- [TDengine 视频教程 - Go Connector](https://www.taosdata.com/blog/2020/11/11/1951.html) -- [TDengine 视频教程 - JDBC Connector](https://www.taosdata.com/blog/2020/11/11/1955.html) -- [TDengine 视频教程 - Node.js Connector](https://www.taosdata.com/blog/2020/11/11/1957.html) -- [TDengine 视频教程 - Python Connector](https://www.taosdata.com/blog/2020/11/11/1963.html) -- [TDengine 视频教程 - RESTful Connector](https://www.taosdata.com/blog/2020/11/11/1965.html) -- [TDengine 视频教程 - “零”代码运维监控](https://www.taosdata.com/blog/2020/11/11/1959.html) - -## 微课堂 - -关注 TDengine 视频号, 有精心制作的微课堂。 - - diff --git a/docs-cn/27-train-faq/03-docker.md b/docs-cn/27-train-faq/03-docker.md index 845a8751846c0995a43fb1c01e6ace3080176838..7791569b25e102b4634f0fb899fc0973cacc0aa1 100644 --- a/docs-cn/27-train-faq/03-docker.md +++ b/docs-cn/27-train-faq/03-docker.md @@ -209,7 +209,7 @@ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0 Press enter key to continue or Ctrl-C to stop ``` - 回车后,该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "beijing" 或者 "shanghai"。 + 回车后,该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "California.SanFrancisco" 或者 "California.SanDieo"。 最后共插入 1 亿条记录。 @@ -279,7 +279,7 @@ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0 $ taos> select groupid, location from test.d0; groupid | location | ================================= - 0 | shanghai | + 0 | California.SanDieo | Query OK, 1 row(s) in set (0.003490s) ``` diff --git a/docs-cn/eco_system.png b/docs-cn/eco_system.png deleted file mode 100644 index bf8bf8f1e0a2311fc12202d712a8a2f9b8ce419b..0000000000000000000000000000000000000000 Binary files a/docs-cn/eco_system.png and /dev/null differ diff --git a/docs-cn/eco_system.webp b/docs-cn/eco_system.webp new file mode 100644 index 0000000000000000000000000000000000000000..d60c38e97c67fa7b2acc703b2ba777d19ae5be13 Binary files /dev/null and b/docs-cn/eco_system.webp differ diff --git a/docs-en/01-index.md b/docs-en/01-index.md index 9574323fe6c9b642cccc053f1f106354105893cb..f5b7f3e0f61507efbb09506b48548c12317e700b 100644 --- a/docs-en/01-index.md +++ b/docs-en/01-index.md @@ -4,24 +4,24 @@ sidebar_label: Documentation Home slug: / --- -TDengine is a [high-performance](https://tdengine.com/fast), [scalable](https://tdengine.com/scalable) time series database with [SQL support](https://tdengine.com/sql-support). This document is the TDengine user manual. It introduces the basic concepts, installation, features, SQL, APIs, operation, maintenance, kernel design, etc. It’s written mainly for architects, developers and system administrators. +TDengine is a [high-performance](https://tdengine.com/fast), [scalable](https://tdengine.com/scalable) time series database with [SQL support](https://tdengine.com/sql-support). This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design and other topics. It’s written mainly for architects, developers and system administrators. -To get a global view about TDengine, like feature list, benchmarks, and competitive advantages, please browse through section [Introduction](./intro). +To get an overview of TDengine, such as a feature list, benchmarks, and competitive advantages, please browse through the [Introduction](./intro) section. -TDengine makes full use of the characteristics of time series data, proposes the concepts of "one table for one data collection point" and "super table", and designs an innovative storage engine, which greatly improves the efficiency of data ingestion, querying and storage. To understand the new concepts and use TDengine in the right way, please read [“Concepts”](./concept) thoroughly. +TDengine greatly improves the efficiency of data ingestion, querying and storage by exploiting the characteristics of time series data, introducing the novel concepts of "one table for one data collection point" and "super table", and designing an innovative storage engine. To understand the new concepts in TDengine and make full use of the features and capabilities of TDengine, please read [“Concepts”](./concept) thoroughly. -If you are a developer, please read the [“Developer Guide”](./develop) carefully. This section introduces the database connection, data modeling, data ingestion, query, continuous query, cache, data subscription, user-defined function, etc. in detail. Sample code is provided for a variety of programming languages. In most cases, you can just copy and paste the sample code, make a few changes to accommodate your application, and it will work. +If you are a developer, please read the [“Developer Guide”](./develop) carefully. This section introduces the database connection, data modeling, data ingestion, query, continuous query, cache, data subscription, user-defined functions, and other functionality in detail. Sample code is provided for a variety of programming languages. In most cases, you can just copy and paste the sample code, make a few changes to accommodate your application, and it will work. -We live in the era of big data, and scale-up is unable to meet the growing business needs. Any modern data system must have the ability to scale out, and clustering has become an indispensable feature of big data systems. The TDengine team has not only developed the cluster feature, they also decided to open source this important feature. To learn how to deploy, manage and maintain a TDengine cluster please refer to ["Cluster"](./cluster). +We live in the era of big data, and scale-up is unable to meet the growing needs of business. Any modern data system must have the ability to scale out, and clustering has become an indispensable feature of big data systems. Not only did the TDengine team develop the cluster feature, but also decided to open source this important feature. To learn how to deploy, manage and maintain a TDengine cluster please refer to ["cluster"](./cluster). -TDengine uses SQL as its query language, which greatly reduces learning costs and migration costs. In addition to the standard SQL, TDengine has extensions to support time series data scenarios better, such as roll up, interpolation, time weighted average, etc. The ["SQL Reference"](./taos-sql) chapter describes the SQL syntax in detail, and lists the various supported commands and functions. +TDengine uses ubiquitious SQL as its query language, which greatly reduces learning costs and migration costs. In addition to the standard SQL, TDengine has extensions to better support time series data analysis. These extensions include functions such as roll up, interpolation and time weighted average, among many others. The ["SQL Reference"](./taos-sql) chapter describes the SQL syntax in detail, and lists the various supported commands and functions. -If you are a system administrator who cares about installation, upgrade, fault tolerance, disaster recovery, data import, data export, system configuration, how to monitor whether TDengine is running healthily, and how to improve system performance, please refer to the ["Administration"](./operation) thoroughly. +If you are a system administrator who cares about installation, upgrade, fault tolerance, disaster recovery, data import, data export, system configuration, how to monitor whether TDengine is running healthily, and how to improve system performance, please refer to, and thoroughly read the ["Administration"](./operation) section. -If you want to know more about TDengine tools, REST API, and connectors for various programming languages, please see the ["Reference"](./reference) chapter. +If you want to know more about TDengine tools, the REST API, and connectors for various programming languages, please see the ["Reference"](./reference) chapter. If you are very interested in the internal design of TDengine, please read the chapter ["Inside TDengine”](./tdinternal), which introduces the cluster design, data partitioning, sharding, writing, and reading processes in detail. If you want to study TDengine code or even contribute code, please read this chapter carefully. -TDengine is an open source database, you are welcome to be a part of TDengine. If you find any errors in the documentation, or the description is not clear, please click "Edit this page" at the bottom of each page to edit it directly. +TDengine is an open source database, and we would love for you to be a part of TDengine. If you find any errors in the documentation, or see parts where more clarity or elaboration is needed, please click "Edit this page" at the bottom of each page to edit it directly. Together, we make a difference. diff --git a/docs-en/02-intro/eco_system.png b/docs-en/02-intro/eco_system.png deleted file mode 100644 index bf8bf8f1e0a2311fc12202d712a8a2f9b8ce419b..0000000000000000000000000000000000000000 Binary files a/docs-en/02-intro/eco_system.png and /dev/null differ diff --git a/docs-en/02-intro/eco_system.webp b/docs-en/02-intro/eco_system.webp new file mode 100644 index 0000000000000000000000000000000000000000..d60c38e97c67fa7b2acc703b2ba777d19ae5be13 Binary files /dev/null and b/docs-en/02-intro/eco_system.webp differ diff --git a/docs-en/02-intro/index.md b/docs-en/02-intro/index.md index e2309943f3983dcbf7957ef6d478aefa64d7a902..f6766f910f4d7560b782bf02ffa97922523e6167 100644 --- a/docs-en/02-intro/index.md +++ b/docs-en/02-intro/index.md @@ -5,39 +5,39 @@ toc_max_heading_level: 2 TDengine is a high-performance, scalable time-series database with SQL support. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](/develop/cache), [stream processing](/develop/continuous-query), [data subscription](/develop/subscribe) and other functionalities to reduce the complexity and cost of development and operation. -This section introduces the major features, competitive advantages, suited scenarios and benchmarks to help you get a high level picture for TDengine. +This section introduces the major features, competitive advantages, typical use-cases and benchmarks to help you get a high level overview of TDengine. ## Major Features The major features are listed below: -1. Besides [using SQL to insert](/develop/insert-data/sql-writing),it supports [Schemaless writing](/reference/schemaless/),and it supports [InfluxDB LINE](/develop/insert-data/influxdb-line),[OpenTSDB Telnet](/develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](/develop/insert-data/opentsdb-json) and other protocols. -2. Support for seamless integration with third-party data collection agents like [Telegraf](/third-party/telegraf),[Prometheus](/third-party/prometheus),[StatsD](/third-party/statsd),[collectd](/third-party/collectd),[icinga2](/third-party/icinga2), [TCollector](/third-party/tcollector), [EMQX](/third-party/emq-broker), [HiveMQ](/third-party/hive-mq-broker). Without a line of code, those agents can write data points into TDengine just by configuration. -3. Support for [all kinds of queries](/develop/query-data), including aggregation, nested query, downsampling, interpolation, etc. -4. Support for [user defined functions](/develop/udf) +1. While TDengine supports [using SQL to insert](/develop/insert-data/sql-writing), it also supports [Schemaless writing](/reference/schemaless/) just like NoSQL databases. TDengine also supports standard protocols like [InfluxDB LINE](/develop/insert-data/influxdb-line),[OpenTSDB Telnet](/develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](/develop/insert-data/opentsdb-json) among others. +2. TDengine supports seamless integration with third-party data collection agents like [Telegraf](/third-party/telegraf),[Prometheus](/third-party/prometheus),[StatsD](/third-party/statsd),[collectd](/third-party/collectd),[icinga2](/third-party/icinga2), [TCollector](/third-party/tcollector), [EMQX](/third-party/emq-broker), [HiveMQ](/third-party/hive-mq-broker). These agents can write data into TDengine with simple configuration and without a single line of code. +3. Support for [all kinds of queries](/develop/query-data), including aggregation, nested query, downsampling, interpolation and others. +4. Support for [user defined functions](/develop/udf). 5. Support for [caching](/develop/cache). TDengine always saves the last data point in cache, so Redis is not needed in some scenarios. 6. Support for [continuous query](/develop/continuous-query). 7. Support for [data subscription](/develop/subscribe) with the capability to specify filter conditions. 8. Support for [cluster](/cluster/), with the capability of increasing processing power by adding more nodes. High availability is supported by replication. -9. Provides interactive [command-line interface](/reference/taos-shell) for management, maintenance and ad-hoc query. +9. Provides an interactive [command-line interface](/reference/taos-shell) for management, maintenance and ad-hoc queries. 10. Provides many ways to [import](/operation/import) and [export](/operation/export) data. -11. Provides [monitoring](/operation/monitor) on TDengine running instances. +11. Provides [monitoring](/operation/monitor) on running instances of TDengine. 12. Provides [connectors](/reference/connector/) for [C/C++](/reference/connector/cpp), [Java](/reference/connector/java), [Python](/reference/connector/python), [Go](/reference/connector/go), [Rust](/reference/connector/rust), [Node.js](/reference/connector/node) and other programming languages. 13. Provides a [REST API](/reference/rest-api/). -14. Supports the seamless integration with [Grafana](/third-party/grafana) for visualization. +14. Supports seamless integration with [Grafana](/third-party/grafana) for visualization. 15. Supports seamless integration with Google Data Studio. -For more detail on features, please read through the whole documentation. +For more details on features, please read through the entire documentation. ## Competitive Advantages -TDengine makes full use of [the characteristics of time series data](https://tdengine.com/2019/07/09/86.html), such as structured, no transaction, rarely delete or update, etc., and builds its own innovative storage engine and computing engine to differentiate itself from other time series databases with the following advantages. +Time-series data is structured, not transactional, and is rarely deleted or updated. TDengine makes full use of [these characteristics of time series data](https://tdengine.com/2019/07/09/86.html) to build its own innovative storage engine and computing engine to differentiate itself from other time series databases, with the following advantages. -- **[High Performance](https://tdengine.com/fast)**: TDengine outperforms other time series databases in data ingestion and querying while significantly reducing storage cost and compute costs, with an innovatively designed and purpose-built storage engine. +- **[High Performance](https://tdengine.com/fast)**: With an innovatively designed and purpose-built storage engine, TDengine outperforms other time series databases in data ingestion and querying while significantly reducing storage costs and compute costs. - **[Scalable](https://tdengine.com/scalable)**: TDengine provides out-of-box scalability and high-availability through its native distributed design. Nodes can be added through simple configuration to achieve greater data processing power. In addition, this feature is open source. -- **[SQL Support](https://tdengine.com/sql-support)**: TDengine uses SQL as the query language, thereby reducing learning and migration costs, while adding SQL extensions to handle time-series data better, and supporting convenient and flexible schemaless data ingestion. +- **[SQL Support](https://tdengine.com/sql-support)**: TDengine uses SQL as the query language, thereby reducing learning and migration costs, while adding SQL extensions to better handle time-series. Keeping NoSQL developers in mind, TDengine also supports convenient and flexible, schemaless data ingestion. - **All in One**: TDengine has built-in caching, stream processing and data subscription functions. It is no longer necessary to integrate Kafka/Redis/HBase/Spark or other software in some scenarios. It makes the system architecture much simpler, cost-effective and easier to maintain. @@ -45,24 +45,24 @@ TDengine makes full use of [the characteristics of time series data](https://tde - **Zero Management**: Installation and cluster setup can be done in seconds. Data partitioning and sharding are executed automatically. TDengine’s running status can be monitored via Grafana or other DevOps tools. -- **Zero Learning Costs**: With SQL as the query language and support for ubiquitous tools like Python, Java, C/C++, Go, Rust, and Node.js connectors, there are zero learning costs. +- **Zero Learning Costs**: With SQL as the query language and support for ubiquitous tools like Python, Java, C/C++, Go, Rust, and Node.js connectors, and a REST API, there are zero learning costs. -- **Interactive Console**: TDengine provides convenient console access to the database to run ad hoc queries, maintain the database, or manage the cluster without any programming. +- **Interactive Console**: TDengine provides convenient console access to the database, through a CLI, to run ad hoc queries, maintain the database, or manage the cluster, without any programming. -With TDengine, the total cost of ownership of time-series data platform can be greatly reduced. Because 1: with its superior performance, the computing and storage resources are reduced significantly; 2:with SQL support, it can be seamlessly integrated with many third party tools, and learning costs/migration costs are reduced significantly; 3: with its simple architecture and zero management, the operation and maintenance costs are reduced. +With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced. 1: With its superior performance, the computing and storage resources are reduced significantly 2: With SQL support, it can be seamlessly integrated with many third party tools, and learning costs/migration costs are reduced significantly 3: With its simple architecture and zero management, the operation and maintenance costs are reduced. ## Technical Ecosystem -In the time-series data processing platform, TDengine stands in a role like this diagram below: +This is how TDengine would be situated, in a typical time-series data processing platform: -![TDengine Technical Ecosystem ](eco_system.png) +![TDengine Database Technical Ecosystem ](eco_system.webp)
Figure 1. TDengine Technical Ecosystem
-On the left side, there are data collection agents like OPC-UA, MQTT, Telegraf and Kafka. On the right side, visualization/BI tools, HMI, Python/R, and IoT Apps can be connected. TDengine itself provides interactive command-line interface and web interface for management and maintenance. +On the left-hand side, there are data collection agents like OPC-UA, MQTT, Telegraf and Kafka. On the right-hand side, visualization/BI tools, HMI, Python/R, and IoT Apps can be connected. TDengine itself provides an interactive command-line interface and a web interface for management and maintenance. -## Suited Scenarios +## Typical Use Cases -As a high-performance, scalable and SQL supported time-series database, TDengine's typical application scenarios include but are not limited to IoT, Industrial Internet, Connected Vehicles, IT operation and maintenance, energy, financial markets and other fields. TDengine is a purpose-built database optimized for the characteristics of time series data, it cannot be used to process data from web crawlers, social media, e-commerce, ERP, CRM, etc. This section makes a more detailed analysis of the applicable scenarios. +As a high-performance, scalable and SQL supported time-series database, TDengine's typical use case include but are not limited to IoT, Industrial Internet, Connected Vehicles, IT operation and maintenance, energy, financial markets and other fields. TDengine is a purpose-built database optimized for the characteristics of time series data. As such, it cannot be used to process data from web crawlers, social media, e-commerce, ERP, CRM and so on. More generally TDengine is not a suitable storage engine for non-time-series data. This section makes a more detailed analysis of the applicable scenarios. ### Characteristics and Requirements of Data Sources diff --git a/docs-en/04-concept/index.md b/docs-en/04-concept/index.md index abc553ab6d90042cb2389ba0b71d3b5395dcebfd..850f705146c4829db579f14be1a686ef9052f678 100644 --- a/docs-en/04-concept/index.md +++ b/docs-en/04-concept/index.md @@ -2,7 +2,7 @@ title: Concepts --- -In order to explain the basic concepts and provide some sample code, the TDengine documentation takes smart meters as a typical time series data scenario. Assuming that each smart meter collects three metrics of current, voltage, and phase, there are multiple smart meters, and each meter has static attributes like location and group ID, the collected data will be similar to the following table: +In order to explain the basic concepts and provide some sample code, the TDengine documentation smart meters as a typical time series use case. We assume the following: 1. Each smart meter collects three metrics i.e. current, voltage, and phase 2. There are multiple smart meters, and 3. Each meter has static attributes like location and group ID. Based on this, collected data will look similar to the following table:
@@ -29,7 +29,7 @@ In order to explain the basic concepts and provide some sample code, the TDengin - + @@ -38,7 +38,7 @@ In order to explain the basic concepts and provide some sample code, the TDengin - + @@ -47,7 +47,7 @@ In order to explain the basic concepts and provide some sample code, the TDengin - + @@ -56,7 +56,7 @@ In order to explain the basic concepts and provide some sample code, the TDengin - + @@ -65,7 +65,7 @@ In order to explain the basic concepts and provide some sample code, the TDengin - + @@ -74,7 +74,7 @@ In order to explain the basic concepts and provide some sample code, the TDengin - + @@ -83,7 +83,7 @@ In order to explain the basic concepts and provide some sample code, the TDengin - + @@ -92,7 +92,7 @@ In order to explain the basic concepts and provide some sample code, the TDengin - + @@ -112,7 +112,7 @@ Label/Tag refers to the static properties of sensors, equipment or other types o ## Data Collection Point -Data Collection Point (DCP) refers to hardware or software that collects metrics based on preset time periods or triggered by events. A data collection point can collect one or multiple metrics, but these metrics are collected at the same time and have the same time stamp. For some complex equipments, there are often multiple data collection points, and the sampling rate of each collection point may be different, and fully independent. For example, for a car, there could be a data collection point to collect GPS position metrics, a data collection point to collect engine status metrics, and a data collection point to collect the environment metrics inside the car, so in this example the car would have three data collection points. +Data Collection Point (DCP) refers to hardware or software that collects metrics based on preset time periods or triggered by events. A data collection point can collect one or multiple metrics, but these metrics are collected at the same time and have the same time stamp. For some complex equipment, there are often multiple data collection points, and the sampling rate of each collection point may be different, and fully independent. For example, for a car, there could be a data collection point to collect GPS position metrics, a data collection point to collect engine status metrics, and a data collection point to collect the environment metrics inside the car. So in this example the car would have three data collection points. ## Table @@ -122,10 +122,10 @@ To make full use of time-series data characteristics, TDengine adopts a strategy 1. Since the metric data from different DCP are fully independent, the data source of each DCP is unique, and a table has only one writer. In this way, data points can be written in a lock-free manner, and the writing speed can be greatly improved. 2. For a DCP, the metric data generated by DCP is ordered by timestamp, so the write operation can be implemented by simple appending, which further greatly improves the data writing speed. -3. The metric data from a DCP is continuously stored in block by block. If you read data for a period of time, it can greatly reduce random read operations and improve read and query performance by orders of magnitude. -4. Inside a data block for a DCP, columnar storage is used, and different compression algorithms are used for different data types. Metrics generally don't vary as significantly between themselves over a time range as compared to other metrics, this allows for a higher compression rate. +3. The metric data from a DCP is continuously stored, block by block. If you read data for a period of time, it can greatly reduce random read operations and improve read and query performance by orders of magnitude. +4. Inside a data block for a DCP, columnar storage is used, and different compression algorithms are used for different data types. Metrics generally don't vary as significantly between themselves over a time range as compared to other metrics, which allows for a higher compression rate. -If the metric data of multiple DCPs are traditionally written into a single table, due to the uncontrollable network delay, the timing of the data from different DCPs arriving at the server cannot be guaranteed, the writing operation must be protected by locks, and the metric data from one DCP cannot be guaranteed to be continuously stored together. **One table for one data collection point can ensure the best performance of insert and query of a single data collection point to the greatest extent.** +If the metric data of multiple DCPs are traditionally written into a single table, due to uncontrollable network delays, the timing of the data from different DCPs arriving at the server cannot be guaranteed, write operations must be protected by locks, and metric data from one DCP cannot be guaranteed to be continuously stored together. **One table for one data collection point can ensure the best performance of insert and query of a single data collection point to the greatest possible extent.** TDengine suggests using DCP ID as the table name (like D1001 in the above table). Each DCP may collect one or multiple metrics (like the current, voltage, phase as above). Each metric has a corresponding column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the time stamp as the index, and won’t build the index on any metrics stored. Column wise storage is used. @@ -139,7 +139,7 @@ In the design of TDengine, **a table is used to represent a specific data collec ## Subtable -When creating a table for a specific data collection point, the user can use a STable as a template and specifies the tag values of this specific DCP to create it. **The table created by using a STable as the template is called subtable** in TDengine. The difference between regular table and subtable is: +When creating a table for a specific data collection point, the user can use a STable as a template and specify the tag values of this specific DCP to create it. **The table created by using a STable as the template is called subtable** in TDengine. The difference between regular table and subtable is: 1. Subtable is a table, all SQL commands applied on a regular table can be applied on subtable. 2. Subtable is a table with extensions, it has static tags (labels), and these tags can be added, deleted, and updated after it is created. But a regular table does not have tags. 3. A subtable belongs to only one STable, but a STable may have many subtables. Regular tables do not belong to a STable. @@ -151,7 +151,7 @@ The relationship between a STable and the subtables created based on this STable 2. The schema of metrics or labels cannot be adjusted through subtables, and it can only be changed via STable. Changes to the schema of a STable takes effect immediately for all associated subtables. 3. STable defines only one template and does not store any data or label information by itself. Therefore, data cannot be written to a STable, only to subtables. -Queries can be executed on both a table (subtable) and a STable. For a query on a STable, TDengine will treat the data in all its subtables as a whole data set for processing. TDengine will first find the subtables that meet the tag filter conditions, then scan the time-series data of these subtables to perform aggregation operation, which can greatly reduce the data sets to be scanned, thus greatly improving the performance of data aggregation across multiple DCPs. +Queries can be executed on both a table (subtable) and a STable. For a query on a STable, TDengine will treat the data in all its subtables as a whole data set for processing. TDengine will first find the subtables that meet the tag filter conditions, then scan the time-series data of these subtables to perform aggregation operation, which reduces the number of data sets to be scanned which in turn greatly improves the performance of data aggregation across multiple DCPs. In TDengine, it is recommended to use a subtable instead of a regular table for a DCP. @@ -167,4 +167,4 @@ FQDN (Fully Qualified Domain Name) is the full domain name of a specific compute Each node of a TDengine cluster is uniquely identified by an End Point, which consists of an FQDN and a Port, such as h1.tdengine.com:6030. In this way, when the IP changes, we can still use the FQDN to dynamically find the node without changing any configuration of the cluster. In addition, FQDN is used to facilitate unified access to the same cluster from the Intranet and the Internet. -TDengine does not recommend using an IP address to access the cluster, FQDN is recommended for cluster management. +TDengine does not recommend using an IP address to access the cluster. FQDN is recommended for cluster management. diff --git a/docs-en/05-get-started/index.md b/docs-en/05-get-started/index.md index 39b2d02eca3c15aebd5715ee64e455781c8236e5..56958ef3ec1c206ee0cff45c67fd3c3a6fa6753a 100644 --- a/docs-en/05-get-started/index.md +++ b/docs-en/05-get-started/index.md @@ -10,7 +10,7 @@ import AptGetInstall from "./\_apt_get_install.mdx"; ## Quick Install -The full package of TDengine includes the server(taosd), taosAdapter for connecting with third-party systems and providing a RESTful interface, client driver(taosc), command-line program(CLI, taos) and some tools. For the current version, the server taosd and taosAdapter can only be installed and run on Linux systems. In the future taosd and taosAdapter will also be supported on Windows, macOS and other systems. The client driver taosc and TDengine CLI can be installed and run on Windows or Linux. In addition to the connectors of multiple languages, [RESTful interface](/reference/rest-api) is also provided by [taosAdapter](/reference/taosadapter) in TDengine. Prior to version 2.4.0.0, however, there is no taosAdapter, the RESTful interface is provided by the built-in HTTP service of taosd. +The full package of TDengine includes the server(taosd), taosAdapter for connecting with third-party systems and providing a RESTful interface, client driver(taosc), command-line program(CLI, taos) and some tools. For the current version, the server taosd and taosAdapter can only be installed and run on Linux systems. In the future taosd and taosAdapter will also be supported on Windows, macOS and other systems. The client driver taosc and TDengine CLI can be installed and run on Windows or Linux. In addition to connectors for multiple languages, TDengine also provides a [RESTful interface](/reference/rest-api) through [taosAdapter](/reference/taosadapter). Prior to version 2.4.0.0, taosAdapter did not exist and the RESTful interface was provided by the built-in HTTP service of taosd. TDengine supports X64/ARM64/MIPS64/Alpha64 hardware platforms, and will support ARM32, RISC-V and other CPU architectures in the future. @@ -130,7 +130,7 @@ After TDengine server is running,execute `taosBenchmark` (previously named tao taosBenchmark ``` -This command will create a super table "meters" under database "test". Under "meters", 10000 tables are created with names from "d0" to "d9999". Each table has 10000 rows and each row has four columns (ts, current, voltage, phase). Time stamp is starting from "2017-07-14 10:40:00 000" to "2017-07-14 10:40:09 999". Each table has tags "location" and "groupId". groupId is set 1 to 10 randomly, and location is set to "beijing" or "shanghai". +This command will create a super table "meters" under database "test". Under "meters", 10000 tables are created with names from "d0" to "d9999". Each table has 10000 rows and each row has four columns (ts, current, voltage, phase). Time stamp is starting from "2017-07-14 10:40:00 000" to "2017-07-14 10:40:09 999". Each table has tags "location" and "groupId". groupId is set 1 to 10 randomly, and location is set to "California.SanFrancisco" or "California.SanDiego". This command will insert 100 million rows into the database quickly. Time to insert depends on the hardware configuration, it only takes a dozen seconds for a regular PC server. @@ -152,10 +152,10 @@ query the average, maximum, minimum of 100 million rows: taos> select avg(current), max(voltage), min(phase) from test.meters; ``` -query the total number of rows with location="beijing": +query the total number of rows with location="California.SanFrancisco": ```sql -taos> select count(*) from test.meters where location="beijing"; +taos> select count(*) from test.meters where location="California.SanFrancisco"; ``` query the average, maximum, minimum of all rows with groupId=10: diff --git a/docs-en/07-develop/01-connect/index.md b/docs-en/07-develop/01-connect/index.md index ecb8caa308da147adc191b98af9df81c7af1eb0b..b9217b828d0d08c4ff1eacd27406d4e3bfba8eac 100644 --- a/docs-en/07-develop/01-connect/index.md +++ b/docs-en/07-develop/01-connect/index.md @@ -1,7 +1,7 @@ --- -sidebar_label: Connection -title: Connect to TDengine -description: "This document explains how to establish connection to TDengine, and briefly introduce how to install and use TDengine connectors." +sidebar_label: Connect +title: Connect +description: "This document explains how to establish connections to TDengine, and briefly introduces how to install and use TDengine connectors." --- import Tabs from "@theme/Tabs"; @@ -19,25 +19,24 @@ import InstallOnLinux from "../../14-reference/03-connector/\_windows_install.md import VerifyLinux from "../../14-reference/03-connector/\_verify_linux.mdx"; import VerifyWindows from "../../14-reference/03-connector/\_verify_windows.mdx"; -Any application programs running on any kind of platforms can access TDengine through the REST API provided by TDengine. For the details, please refer to [REST API](/reference/rest-api/). Besides, application programs can use the connectors of multiple programming languages to access TDengine, including C/C++, Java, Python, Go, Node.js, C#, and Rust. This chapter describes how to establish connection to TDengine and briefly introduce how to install and use connectors. For details about the connectors, please refer to [Connectors](/reference/connector/) +Any application programs running on any kind of platform can access TDengine through the REST API provided by TDengine. For details, please refer to [REST API](/reference/rest-api/). Additionally, application programs can use the connectors of multiple programming languages including C/C++, Java, Python, Go, Node.js, C#, and Rust to access TDengine. This chapter describes how to establish a connection to TDengine and briefly introduces how to install and use connectors. For details about the connectors, please refer to [Connectors](/reference/connector/) ## Establish Connection There are two ways for a connector to establish connections to TDengine: -1. Connection through the REST API provided by taosAdapter component, this way is called "REST connection" hereinafter. +1. Connection through the REST API provided by the taosAdapter component, this way is called "REST connection" hereinafter. 2. Connection through the TDengine client driver (taosc), this way is called "Native connection" hereinafter. -Either way, same or similar APIs are provided by connectors to access database or execute SQL statements, no obvious difference can be observed. - Key differences: -1. With REST connection, it's not necessary to install TDengine client driver (taosc), it's more friendly for cross-platform with the cost of 30% performance downgrade. When taosc has an upgrade, application does not need to make changes. -2. With native connection, full compatibility of TDengine can be utilized, like [Parameter Binding](/reference/connector/cpp#Parameter Binding-api), [Subscription](reference/connector/cpp#Subscription), etc. But taosc has to be installed, some platforms may not be supported. +1. The TDengine client driver (taosc) has the highest performance with all the features of TDengine like [Parameter Binding](/reference/connector/cpp#parameter-binding-api), [Subscription](/reference/connector/cpp#subscription-and-consumption-api), etc. +2. The TDengine client driver (taosc) is not supported across all platforms, and applications built on taosc may need to be modified when updating taosc to newer versions. +3. The REST connection is more accessible with cross-platform support, however it results in a 30% performance downgrade. ## Install Client Driver taosc -If choosing to use native connection and the application is not on the same host as TDengine server, TDengine client driver taosc needs to be installed on the host where the application is. If choosing to use REST connection or the application is on the same host as server side, this step can be skipped. It's better to use same version of taosc as the server. +If you are choosing to use the native connection and the the application is not on the same host as TDengine server, the TDengine client driver taosc needs to be installed on the application host. If choosing to use the REST connection or the application is on the same host as TDengine server, this step can be skipped. It's better to use same version of taosc as the TDengine server. ### Install diff --git a/docs-en/07-develop/02-model/index.mdx b/docs-en/07-develop/02-model/index.mdx index 962a75338f0384ee8facb4682342e25e536e4ecb..86853aaaa3f7285fe042a892e2ec903d57894111 100644 --- a/docs-en/07-develop/02-model/index.mdx +++ b/docs-en/07-develop/02-model/index.mdx @@ -2,19 +2,26 @@ title: Data Model --- -The data model employed by TDengine is similar to relational database, you need to create databases and tables. For a specific application, the design of databases, STables (abbreviated for super table), and tables need to be considered. This chapter will explain the big picture without syntax details. +The data model employed by TDengine is similar to that of a relational database. You have to create databases and tables. You must design the data model based on your own business and application requirements. You should design the STable (an abbreviation for super table) schema to fit your data. This chapter will explain the big picture without getting into syntactical details. ## Create Database -The characteristics of data from different data collection points may be different, such as collection frequency, days to keep, number of replicas, data block size, whether it's allowed to update data, etc. For TDengine to operate with the best performance, it's strongly suggested to put the data with different characteristics into different databases because different storage policy can be set for each database. When creating a database, there are a lot of parameters that can be configured, such as the days to keep data, the number of replicas, the number of memory blocks, time precision, the minimum and maximum number of rows in each data block, compress or not, the time range of the data in single data file, etc. Below is an example of the SQL statement for creating a database. +The [characteristics of time-series data](https://www.taosdata.com/blog/2019/07/09/86.html) from different data collection points may be different. Characteristics include collection frequency, retention policy and others which determine how you create and configure the database. For e.g. days to keep, number of replicas, data block size, whether data updates are allowed and other configurable parameters would be determined by the characteristics of your data and your business requirements. For TDengine to operate with the best performance, we strongly recommend that you create and configure different databases for data with different characteristics. This allows you, for example, to set up different storage and retention policies. When creating a database, there are a lot of parameters that can be configured such as, the days to keep data, the number of replicas, the number of memory blocks, time precision, the minimum and maximum number of rows in each data block, whether compression is enabled, the time range of the data in single data file and so on. Below is an example of the SQL statement to create a database. ```sql CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 6 UPDATE 1; ``` -In the above SQL statement, a database named "power" will be created, the data in it will be kept for 365 days, which means the data older than 365 days will be deleted automatically, a new data file will be created every 10 days, the number of memory blocks is 6, data is allowed to be updated. For more details please refer to [Database](/taos-sql/database). +In the above SQL statement: +- a database named "power" will be created +- the data in it will be kept for 365 days, which means that data older than 365 days will be deleted automatically +- a new data file will be created every 10 days +- the number of memory blocks is 6 +- data is allowed to be updated -After creating a database, the current database in use can be switched using SQL command `USE`, for example below SQL statement switches the current database to `power`. Without current database specified, table name must be preceded with the corresponding database name. +For more details please refer to [Database](/taos-sql/database). + +After creating a database, the current database in use can be switched using SQL command `USE`. For example the SQL statement below switches the current database to `power`. Without the current database specified, table name must be preceded with the corresponding database name. ```sql USE power; @@ -23,14 +30,14 @@ USE power; :::note - Any table or STable must belong to a database. To create a table or STable, the database it belongs to must be ready. -- JOIN operation can't be performed tables from two different databases. +- JOIN operations can't be performed on tables from two different databases. - Timestamp needs to be specified when inserting rows or querying historical rows. ::: ## Create STable -In a time-series application, there may be multiple kinds of data collection points. For example, in the electrical power system there are meters, transformers, bus bars, switches, etc. For easy and efficient aggregation of multiple tables, one STable needs to be created for each kind of data collection point. For example, for the meters in [table 1](/tdinternal/arch#model_table1), below SQL statement can be used to create the super table. +In a time-series application, there may be multiple kinds of data collection points. For example, in the electrical power system there are meters, transformers, bus bars, switches, etc. For easy and efficient aggregation of multiple tables, one STable needs to be created for each kind of data collection point. For example, for the meters in [table 1](/tdinternal/arch#model_table1), the SQL statement below can be used to create the super table. ```sql CREATE STable meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int); @@ -41,44 +48,46 @@ If you are using versions prior to 2.0.15, the `STable` keyword needs to be repl ::: -Similar to creating a regular table, when creating a STable, name and schema need to be provided too. In the STable schema, the first column must be timestamp (like ts in the example), and other columns (like current, voltage and phase in the example) are the data collected. The type of a column can be integer, float, double, string ,etc. Besides, the schema for tags need to be provided, like location and groupId in the example. The type of a tag can be integer, float, string, etc. The static properties of a data collection point can be defined as tags, like the location, device type, device group ID, manager ID, etc. Tags in the schema can be added, removed or updated. Please refer to [STable](/taos-sql/stable) for more details. +Similar to creating a regular table, when creating a STable, the name and schema need to be provided. In the STable schema, the first column must always be a timestamp (like ts in the example), and the other columns (like current, voltage and phase in the example) are the data collected. The remaining columns can [contain data of type](/taos-sql/data-type/) integer, float, double, string etc. In addition, the schema for tags, like location and groupId in the example, must be provided. The tag type can be integer, float, string, etc. Tags are essentially the static properties of a data collection point. For example, properties like the location, device type, device group ID, manager ID are tags. Tags in the schema can be added, removed or updated. Please refer to [STable](/taos-sql/stable) for more details. -For each kind of data collection points, a corresponding STable must be created. There may be many STables in an application. For electrical power system, we need to create a STable respectively for meters, transformers, busbars, switches. There may be multiple kinds of data collection points on a single device, for example there may be one data collection point for electrical data like current and voltage and another point for environmental data like temperature, humidity and wind direction, multiple STables are required for such kind of device. +For each kind of data collection point, a corresponding STable must be created. There may be many STables in an application. For electrical power system, we need to create a STable respectively for meters, transformers, busbars, switches. There may be multiple kinds of data collection points on a single device, for example there may be one data collection point for electrical data like current and voltage and another data collection point for environmental data like temperature, humidity and wind direction. Multiple STables are required for these kinds of devices. -At most 4096 (or 1024 prior to version 2.1.7.0) columns are allowed in a STable. If there are more than 4096 of metrics to bo collected for a data collection point, multiple STables are required for such kind of data collection point. There can be multiple databases in system, while one or more STables can exist in a database. +At most 4096 (or 1024 prior to version 2.1.7.0) columns are allowed in a STable. If there are more than 4096 of metrics to be collected for a data collection point, multiple STables are required. There can be multiple databases in a system, while one or more STables can exist in a database. ## Create Table -A specific table needs to be created for each data collection point. Similar to RDBMS, table name and schema are required to create a table. Beside, one or more tags can be created for each table. To create a table, a STable needs to be used as template and the values need to be specified for the tags. For example, for the meters in [Table 1](/tdinternal/arch#model_table1), the table can be created using below SQL statement. +A specific table needs to be created for each data collection point. Similar to RDBMS, table name and schema are required to create a table. Additionally, one or more tags can be created for each table. To create a table, a STable needs to be used as template and the values need to be specified for the tags. For example, for the meters in [Table 1](/tdinternal/arch#model_table1), the table can be created using below SQL statement. ```sql -CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2); +CREATE TABLE d1001 USING meters TAGS ("California.SanFrancisco", 2); ``` -In the above SQL statement, "d1001" is the table name, "meters" is the STable name, followed by the value of tag "Location" and the value of tag "groupId", which are "Beijing.Chaoyang" and "2" respectively in the example. The tag values can be updated after the table is created. Please refer to [Tables](/taos-sql/table) for details. +In the above SQL statement, "d1001" is the table name, "meters" is the STable name, followed by the value of tag "Location" and the value of tag "groupId", which are "California.SanFrancisco" and "2" respectively in the example. The tag values can be updated after the table is created. Please refer to [Tables](/taos-sql/table) for details. -In TDengine system, it's recommended to create a table for a data collection point via STable. Table created via STable is called subtable in some parts of TDengine document. All SQL commands applied on regular table can be applied on subtable. +In the TDengine system, it's recommended to create a table for a data collection point via STable. A table created via STable is called subtable in some parts of the TDengine documentation. All SQL commands applied on regular tables can be applied on subtables. :::warning It's not recommended to create a table in a database while using a STable from another database as template. :::tip -It's suggested to use the global unique ID of a data collection point as the table name, for example the device serial number. If there isn't such a unique ID, multiple IDs that are not global unique can be combined to form a global unique ID. It's not recommended to use a global unique ID as tag value. +It's suggested to use the globally unique ID of a data collection point as the table name. For example the device serial number could be used as a unique ID. If a unique ID doesn't exist, multiple IDs that are not globally unique can be combined to form a globally unique ID. It's not recommended to use a globally unique ID as tag value. ## Create Table Automatically -In some circumstances, it's not sure whether the table already exists when inserting rows. The table can be created automatically using the SQL statement below, and nothing will happen if the table already exist. +In some circumstances, it's unknown whether the table already exists when inserting rows. The table can be created automatically using the SQL statement below, and nothing will happen if the table already exists. ```sql -INSERT INTO d1001 USING meters TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32); +INSERT INTO d1001 USING meters TAGS ("California.SanFrancisco", 2) VALUES (now, 10.2, 219, 0.32); ``` -In the above SQL statement, a row with value `(now, 10.2, 219, 0.32)` will be inserted into table "d1001". If table "d1001" doesn't exist, it will be created automatically using STable "meters" as template with tag value `"Beijing.Chaoyang", 2`. +In the above SQL statement, a row with value `(now, 10.2, 219, 0.32)` will be inserted into table "d1001". If table "d1001" doesn't exist, it will be created automatically using STable "meters" as template with tag value `"California.SanFrancisco", 2`. For more details please refer to [Create Table Automatically](/taos-sql/insert#automatically-create-table-when-inserting). ## Single Column vs Multiple Column -Multiple columns data model is supported in TDengine. As long as multiple metrics are collected by same data collection point at same time, i.e. the timestamp are identical, these metrics can be put in single stable as columns. However, there is another kind of design, i.e. single column data model, a table is created for each metric, which means a STable is required for each kind of metric. For example, 3 STables are required for current, voltage and phase. +A multiple columns data model is supported in TDengine. As long as multiple metrics are collected by the same data collection point at the same time, i.e. the timestamps are identical, these metrics can be put in a single STable as columns. + +However, there is another kind of design, i.e. single column data model in which a table is created for each metric. This means that a STable is required for each kind of metric. For example in a single column model, 3 STables would be required for current, voltage and phase. -It's recommended to use multiple column data model as much as possible because it's better in the performance of inserting or querying rows. In some cases, however, the metrics to be collected vary frequently and correspondingly the STable schema needs to be changed frequently too. In such case, it's more convenient to use single column data model. +It's recommended to use a multiple column data model as much as possible because insert and query performance is higher. In some cases, however, the collected metrics may vary frequently and so the corresponding STable schema needs to be changed frequently too. In such cases, it's more convenient to use single column data model. diff --git a/docs-en/07-develop/03-insert-data/01-sql-writing.mdx b/docs-en/07-develop/03-insert-data/01-sql-writing.mdx index 9f66992d3de755389c3a0722ebb09097177742f1..397b1a14fd76c1372c79eb88575f2bf21cb62050 100644 --- a/docs-en/07-develop/03-insert-data/01-sql-writing.mdx +++ b/docs-en/07-develop/03-insert-data/01-sql-writing.mdx @@ -1,5 +1,5 @@ --- -sidebar_label: SQL +sidebar_label: Insert Using SQL title: Insert Using SQL --- @@ -22,11 +22,11 @@ import CStmt from "./_c_stmt.mdx"; ## Introduction -Application program can execute `INSERT` statement through connectors to insert rows. TAOS CLI can be launched manually to insert data too. +Application programs can execute `INSERT` statement through connectors to insert rows. The TAOS CLI can also be used to manually insert data. ### Insert Single Row -Below SQL statement is used to insert one row into table "d1001". +The below SQL statement is used to insert one row into table "d1001". ```sql INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31); @@ -34,7 +34,7 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31); ### Insert Multiple Rows -Multiple rows can be inserted in single SQL statement. Below example inserts 2 rows into table "d1001". +Multiple rows can be inserted in a single SQL statement. The example below inserts 2 rows into table "d1001". ```sql INSERT INTO d1001 VALUES (1538548684000, 10.2, 220, 0.23) (1538548696650, 10.3, 218, 0.25); @@ -42,7 +42,7 @@ INSERT INTO d1001 VALUES (1538548684000, 10.2, 220, 0.23) (1538548696650, 10.3, ### Insert into Multiple Tables -Data can be inserted into multiple tables in same SQL statement. Below example inserts 2 rows into table "d1001" and 1 row into table "d1002". +Data can be inserted into multiple tables in the same SQL statement. The example below inserts 2 rows into table "d1001" and 1 row into table "d1002". ```sql INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, 218, 0.33) d1002 VALUES (1538548696800, 12.3, 221, 0.31); @@ -52,14 +52,14 @@ For more details about `INSERT` please refer to [INSERT](/taos-sql/insert). :::info -- Inserting in batch can gain better performance. Normally, the higher the batch size, the better the performance. Please be noted each single row can't exceed 16K bytes and each single SQL statement can't exceed 1M bytes. -- Inserting with multiple threads can gain better performance too. However, depending on the system resources on the application side and the server side, with the number of inserting threads grows to a specific point, the performance may drop instead of growing. The proper number of threads need to be tested in a specific environment to find the best number. +- Inserting in batches can improve performance. Normally, the higher the batch size, the better the performance. Please note that a single row can't exceed 48K bytes and each SQL statement can't exceed 1MB. +- Inserting with multiple threads can also improve performance. However, depending on the system resources on the application side and the server side, when the number of inserting threads grows beyond a specific point the performance may drop instead of improving. The proper number of threads needs to be tested in a specific environment to find the best number. ::: :::warning -- If the timestamp for the row to be inserted already exists in the table, the behavior depends on the value of parameter `UPDATE`. If it's set to 0 (also the default value), the row will be discarded. If it's set to 1, the new values will override the old values for the same row. +- If the timestamp for the row to be inserted already exists in the table, the behavior depends on the value of parameter `UPDATE`. If it's set to 0 (the default value), the row will be discarded. If it's set to 1, the new values will override the old values for the same row. - The timestamp to be inserted must be newer than the timestamp of subtracting current time by the parameter `KEEP`. If `KEEP` is set to 3650 days, then the data older than 3650 days ago can't be inserted. The timestamp to be inserted can't be newer than the timestamp of current time plus parameter `DAYS`. If `DAYS` is set to 2, the data newer than 2 days later can't be inserted. ::: @@ -95,13 +95,13 @@ For more details about `INSERT` please refer to [INSERT](/taos-sql/insert). :::note 1. With either native connection or REST connection, the above samples can work well. -2. Please be noted that `use db` can't be used with REST connection because REST connection is stateless, so in the samples `dbName.tbName` is used to specify the table name. +2. Please note that `use db` can't be used with a REST connection because REST connections are stateless, so in the samples `dbName.tbName` is used to specify the table name. ::: ### Insert with Parameter Binding -TDengine also provides Prepare API that support parameter binding. Similar to MySQL, only `?` can be used in these APIs to represent the parameters to bind. From version 2.1.1.0 and 2.1.2.0, parameter binding support for inserting data has been improved significantly to improve the insert performance by avoiding the cost of parsing SQL statements. +TDengine also provides API support for parameter binding. Similar to MySQL, only `?` can be used in these APIs to represent the parameters to bind. From version 2.1.1.0 and 2.1.2.0, parameter binding support for inserting data has improved significantly to improve the insert performance by avoiding the cost of parsing SQL statements. Parameter binding is available only with native connection. diff --git a/docs-en/07-develop/03-insert-data/02-influxdb-line.mdx b/docs-en/07-develop/03-insert-data/02-influxdb-line.mdx index 172003d203fa309ce51b3ecae9a7490a59f513d7..be46ebf0c97a29b57c1b57eb8ea5c9394f85b93a 100644 --- a/docs-en/07-develop/03-insert-data/02-influxdb-line.mdx +++ b/docs-en/07-develop/03-insert-data/02-influxdb-line.mdx @@ -15,13 +15,13 @@ import CLine from "./_c_line.mdx"; ## Introduction -A single line of text is used in InfluxDB Line protocol format represents one row of data, each line contains 4 parts as shown below. +In the InfluxDB Line protocol format, a single line of text is used to represent one row of data. Each line contains 4 parts as shown below. ``` measurement,tag_set field_set timestamp ``` -- `measurement` will be used as the STable name +- `measurement` will be used as the name of the STable - `tag_set` will be used as tags, with format like `=,=` - `field_set`will be used as data columns, with format like `=,=` - `timestamp` is the primary key timestamp corresponding to this row of data @@ -29,13 +29,13 @@ measurement,tag_set field_set timestamp For example: ``` -meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500 +meters,location=California.LoSangeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500 ``` :::note -- All the data in `tag_set` will be converted to ncahr type automatically . -- Each data in `field_set` must be self-description for its data type. For example 1.2f32 means a value 1.2 of float type, it will be treated as double without the "f" type suffix. +- All the data in `tag_set` will be converted to nchar type automatically . +- Each data in `field_set` must be self-descriptive for its data type. For example 1.2f32 means a value 1.2 of float type. Without the "f" type suffix, it will be treated as type double. - Multiple kinds of precision can be used for the `timestamp` field. Time precision can be from nanosecond (ns) to hour (h). ::: diff --git a/docs-en/07-develop/03-insert-data/03-opentsdb-telnet.mdx b/docs-en/07-develop/03-insert-data/03-opentsdb-telnet.mdx index 66bb67c25669b906183526377f60b969ea3d1e85..18a695cda8efbef075451ff53e542d9e69c58e0b 100644 --- a/docs-en/07-develop/03-insert-data/03-opentsdb-telnet.mdx +++ b/docs-en/07-develop/03-insert-data/03-opentsdb-telnet.mdx @@ -15,21 +15,21 @@ import CTelnet from "./_c_opts_telnet.mdx"; ## Introduction -A single line of text is used in OpenTSDB line protocol to represent one row of data. OpenTSDB employs single column data model, so one line can only contains single data column. There can be multiple tags. Each line contains 4 parts as below: +A single line of text is used in OpenTSDB line protocol to represent one row of data. OpenTSDB employs a single column data model, so each line can only contain a single data column. There can be multiple tags. Each line contains 4 parts as below: ``` =[ =] ``` -- `metric` will be used as STable name. -- `timestamp` is the timestamp of current row of data. The time precision will be determined automatically based on the length of the timestamp. second and millisecond time precision are supported.\ +- `metric` will be used as the STable name. +- `timestamp` is the timestamp of current row of data. The time precision will be determined automatically based on the length of the timestamp. Second and millisecond time precision are supported. - `value` is a metric which must be a numeric value, the corresponding column name is "value". -- The last part is tag sets separated by space, all tags will be converted to nchar type automatically. +- The last part is the tag set separated by spaces, all tags will be converted to nchar type automatically. For example: ```txt -meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3 +meters.current 1648432611250 11.3 location=California.LoSangeles groupid=3 ``` Please refer to [OpenTSDB Telnet API](http://opentsdb.net/docs/build/html/api_telnet/put.html) for more details. @@ -60,7 +60,7 @@ Please refer to [OpenTSDB Telnet API](http://opentsdb.net/docs/build/html/api_te -2 STables will be crated automatically while each STable has 4 rows of data in the above sample code. +2 STables will be created automatically and each STable has 4 rows of data in the above sample code. ```cmd taos> use test; @@ -76,9 +76,9 @@ Query OK, 2 row(s) in set (0.002544s) taos> select tbname, * from `meters.current`; tbname | ts | value | groupid | location | ================================================================================================================================== - t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.249 | 10.800000000 | 3 | Beijing.Haidian | - t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.250 | 11.300000000 | 3 | Beijing.Haidian | - t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.249 | 10.300000000 | 2 | Beijing.Chaoyang | - t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.250 | 12.600000000 | 2 | Beijing.Chaoyang | + t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.249 | 10.800000000 | 3 | California.LoSangeles | + t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.250 | 11.300000000 | 3 | California.LoSangeles | + t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.249 | 10.300000000 | 2 | California.SanFrancisco | + t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.250 | 12.600000000 | 2 | California.SanFrancisco | Query OK, 4 row(s) in set (0.005399s) ``` diff --git a/docs-en/07-develop/03-insert-data/04-opentsdb-json.mdx b/docs-en/07-develop/03-insert-data/04-opentsdb-json.mdx index d4f723dcdeb78c54ba31fd4f6aa2528a90376c5f..3a239440311c736159d6060db5e730c5e5665bcb 100644 --- a/docs-en/07-develop/03-insert-data/04-opentsdb-json.mdx +++ b/docs-en/07-develop/03-insert-data/04-opentsdb-json.mdx @@ -47,7 +47,7 @@ Please refer to [OpenTSDB HTTP API](http://opentsdb.net/docs/build/html/api_http :::note - In JSON protocol, strings will be converted to nchar type and numeric values will be converted to double type. -- Only data in array format is accepted, array must be used even there is only one row. +- Only data in array format is accepted and so an array must be used even if there is only one row. ::: @@ -93,7 +93,7 @@ Query OK, 2 row(s) in set (0.001954s) taos> select * from `meters.current`; ts | value | groupid | location | =================================================================================================================== - 2022-03-28 09:56:51.249 | 10.300000000 | 2.000000000 | Beijing.Chaoyang | - 2022-03-28 09:56:51.250 | 12.600000000 | 2.000000000 | Beijing.Chaoyang | + 2022-03-28 09:56:51.249 | 10.300000000 | 2.000000000 | California.SanFrancisco | + 2022-03-28 09:56:51.250 | 12.600000000 | 2.000000000 | California.SanFrancisco | Query OK, 2 row(s) in set (0.004076s) ``` diff --git a/docs-en/07-develop/03-insert-data/index.md b/docs-en/07-develop/03-insert-data/index.md index ee80d436f11f19b422df261845f1c209620251f2..1a71e719a56448e4b535632e570ce8a04d2282bb 100644 --- a/docs-en/07-develop/03-insert-data/index.md +++ b/docs-en/07-develop/03-insert-data/index.md @@ -1,12 +1,12 @@ --- -title: Insert +title: Insert Data --- -TDengine supports multiple protocols of inserting data, including SQL, InfluxDB Line protocol, OpenTSDB Telnet protocol, OpenTSDB JSON protocol. Data can be inserted row by row, or in batch. Data from one or more collecting points can be inserted simultaneously. In the meantime, data can be inserted with multiple threads, out of order data and historical data can be inserted too. InfluxDB Line protocol, OpenTSDB Telnet protocol and OpenTSDB JSON protocol are the 3 kinds of schemaless insert protocols supported by TDengine. It's not necessary to create stable and table in advance if using schemaless protocols, and the schemas can be adjusted automatically according to the data to be inserted. +TDengine supports multiple protocols of inserting data, including SQL, InfluxDB Line protocol, OpenTSDB Telnet protocol, and OpenTSDB JSON protocol. Data can be inserted row by row, or in batches. Data from one or more collection points can be inserted simultaneously. Data can be inserted with multiple threads, and out of order data and historical data can be inserted as well. InfluxDB Line protocol, OpenTSDB Telnet protocol and OpenTSDB JSON protocol are the 3 kinds of schemaless insert protocols supported by TDengine. It's not necessary to create STables and tables in advance if using schemaless protocols, and the schemas can be adjusted automatically based on the data being inserted. ```mdx-code-block import DocCardList from '@theme/DocCardList'; import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; -``` \ No newline at end of file +``` diff --git a/docs-en/07-develop/04-query-data/_category_.yml b/docs-en/07-develop/04-query-data/_category_.yml index 5912a48fc31ed36235c0d34d8b0909bf3b518aaa..809db34621a63505ceace7ba182e07c698bdbddb 100644 --- a/docs-en/07-develop/04-query-data/_category_.yml +++ b/docs-en/07-develop/04-query-data/_category_.yml @@ -1 +1 @@ -label: Select Data +label: Query Data diff --git a/docs-en/07-develop/04-query-data/index.mdx b/docs-en/07-develop/04-query-data/index.mdx index 4016f8453ba9e0679a2798b92cd40efcb926343b..a212fa9529215fc24c55c95a166cfc1a407359b2 100644 --- a/docs-en/07-develop/04-query-data/index.mdx +++ b/docs-en/07-develop/04-query-data/index.mdx @@ -1,6 +1,6 @@ --- -Sidebar_label: Select -title: Select +Sidebar_label: Query data +title: Query data description: "This chapter introduces major query functionalities and how to perform sync and async query using connectors." --- @@ -20,7 +20,7 @@ import CAsync from "./_c_async.mdx"; ## Introduction -SQL is used by TDengine as the query language. Application programs can send SQL statements to TDengine through REST API or connectors. TDengine CLI `taos` can also be used to execute SQL Ad-Hoc query. Here is the list of major query functionalities supported by TDengine: +SQL is used by TDengine as its query language. Application programs can send SQL statements to TDengine through REST API or connectors. TDengine's CLI `taos` can also be used to execute ad hoc SQL queries. Here is the list of major query functionalities supported by TDengine: - Query on single column or multiple columns - Filter on tags or data columns:>, <, =, <\>, like @@ -31,7 +31,7 @@ SQL is used by TDengine as the query language. Application programs can send SQL - Join query with timestamp alignment - Aggregate functions: count, max, min, avg, sum, twa, stddev, leastsquares, top, bottom, first, last, percentile, apercentile, last_row, spread, diff -For example, below SQL statement can be executed in TDengine CLI `taos` to select the rows whose voltage column is bigger than 215 and limit the output to only 2 rows. +For example, the SQL statement below can be executed in TDengine CLI `taos` to select records with voltage greater than 215 and limit the output to only 2 rows. ```sql select * from d1001 where voltage > 215 order by ts desc limit 2; @@ -46,46 +46,46 @@ taos> select * from d1001 where voltage > 215 order by ts desc limit 2; Query OK, 2 row(s) in set (0.001100s) ``` -To meet the requirements in many use cases, some special functions have been added in TDengine, for example `twa` (Time Weighted Average), `spared` (The difference between the maximum and the minimum), `last_row` (the last row), more and more functions will be added to better perform in many use cases. Furthermore, continuous query is also supported in TDengine. +To meet the requirements of varied use cases, some special functions have been added in TDengine. Some examples are `twa` (Time Weighted Average), `spread` (The difference between the maximum and the minimum), and `last_row` (the last row). Furthermore, continuous query is also supported in TDengine. For detailed query syntax please refer to [Select](/taos-sql/select). ## Aggregation among Tables -In many use cases, there are always multiple kinds of data collection points. A new concept, called STable (abbreviated for super table), is used in TDengine to represent a kind of data collection points, and a table is used to represent a specific data collection point. Tags are used by TDengine to represent the static properties of data collection points. A specific data collection point has its own values for static properties. By specifying filter conditions on tags, aggregation can be performed efficiently among all the subtables created via the same STable, i.e. same kind of data collection points, can be. Aggregate functions applicable for tables can be used directly on STables, syntax is exactly same. +In most use cases, there are always multiple kinds of data collection points. A new concept, called STable (abbreviation for super table), is used in TDengine to represent one type of data collection point, and a subtable is used to represent a specific data collection point of that type. Tags are used by TDengine to represent the static properties of data collection points. A specific data collection point has its own values for static properties. By specifying filter conditions on tags, aggregation can be performed efficiently among all the subtables created via the same STable, i.e. same type of data collection points. Aggregate functions applicable for tables can be used directly on STables; the syntax is exactly the same. -In summary, for a STable, its subtables can be aggregated by a simple query on STable, it's kind of join operation. But tables belong to different STables could not be aggregated. +In summary, records across subtables can be aggregated by a simple query on their STable. It is like a join operation. However, tables belonging to different STables can not be aggregated. ### Example 1 -In TDengine CLI `taos`, use below SQL to get the average voltage of all the meters in BeiJing grouped by location. +In TDengine CLI `taos`, use the SQL below to get the average voltage of all the meters in California grouped by location. ``` taos> SELECT AVG(voltage) FROM meters GROUP BY location; avg(voltage) | location | ============================================================= - 222.000000000 | Beijing.Haidian | - 219.200000000 | Beijing.Chaoyang | + 222.000000000 | California.LosAngeles | + 219.200000000 | California.SanFrancisco | Query OK, 2 row(s) in set (0.002136s) ``` ### Example 2 -In TDengine CLI `taos`, use below SQL to get the number of rows and the maximum current in the past 24 hours from meters whose groupId is 2. +In TDengine CLI `taos`, use the SQL below to get the number of rows and the maximum current in the past 24 hours from meters whose groupId is 2. ``` taos> SELECT count(*), max(current) FROM meters where groupId = 2 and ts > now - 24h; - cunt(*) | max(current) | + count(*) | max(current) | ================================== 5 | 13.4 | Query OK, 1 row(s) in set (0.002136s) ``` -Join query is allowed between only the tables of same STable. In [Select](/taos-sql/select), all query operations are marked as whether it supports STable or not. +Join queries are only allowed between subtables of the same STable. In [Select](/taos-sql/select), all query operations are marked as to whether they support STables or not. ## Down Sampling and Interpolation -In IoT use cases, down sampling is widely used to aggregate the data by time range. `INTERVAL` keyword in TDengine can be used to simplify the query by time window. For example, below SQL statement can be used to get the sum of current every 10 seconds from meters table d1001. +In IoT use cases, down sampling is widely used to aggregate data by time range. The `INTERVAL` keyword in TDengine can be used to simplify the query by time window. For example, the SQL statement below can be used to get the sum of current every 10 seconds from meters table d1001. ``` taos> SELECT sum(current) FROM d1001 INTERVAL(10s); @@ -96,10 +96,10 @@ taos> SELECT sum(current) FROM d1001 INTERVAL(10s); Query OK, 2 row(s) in set (0.000883s) ``` -Down sampling can also be used for STable. For example, below SQL statement can be used to get the sum of current from all meters in BeiJing. +Down sampling can also be used for STable. For example, the below SQL statement can be used to get the sum of current from all meters in California. ``` -taos> SELECT SUM(current) FROM meters where location like "Beijing%" INTERVAL(1s); +taos> SELECT SUM(current) FROM meters where location like "California%" INTERVAL(1s); ts | sum(current) | ====================================================== 2018-10-03 14:38:04.000 | 10.199999809 | @@ -110,7 +110,7 @@ taos> SELECT SUM(current) FROM meters where location like "Beijing%" INTERVAL(1s Query OK, 5 row(s) in set (0.001538s) ``` -Down sampling also supports time offset. For example, below SQL statement can be used to get the sum of current from all meters but each time window must start at the boundary of 500 milliseconds. +Down sampling also supports time offset. For example, the below SQL statement can be used to get the sum of current from all meters but each time window must start at the boundary of 500 milliseconds. ``` taos> SELECT SUM(current) FROM meters INTERVAL(1s, 500a); @@ -124,7 +124,7 @@ taos> SELECT SUM(current) FROM meters INTERVAL(1s, 500a); Query OK, 5 row(s) in set (0.001521s) ``` -In many use cases, it's hard to align the timestamp of the data collected by each collection point. However, a lot of algorithms like FFT require the data to be aligned with same time interval and application programs have to handle by themselves in many systems. In TDengine, it's easy to achieve the alignment using down sampling. +In many use cases, it's hard to align the timestamp of the data collected by each collection point. However, a lot of algorithms like FFT require the data to be aligned with same time interval and application programs have to handle this by themselves. In TDengine, it's easy to achieve the alignment using down sampling. Interpolation can be performed in TDengine if there is no data in a time range. @@ -162,16 +162,16 @@ In the section describing [Insert](/develop/insert-data/sql-writing), a database :::note -1. With either REST connection or native connection, the above sample code work well. -2. Please be noted that `use db` can't be used in case of REST connection because it's stateless. +1. With either REST connection or native connection, the above sample code works well. +2. Please note that `use db` can't be used in case of REST connection because it's stateless. ::: ### Asynchronous Query -Besides synchronous query, asynchronous query API is also provided by TDengine to insert or query data more efficiently. With similar hardware and software environment, async API is 2~4 times faster than sync APIs. Async API works in non-blocking mode, which means an operation can be returned without finishing so that the calling thread can switch to other works to improve the performance of the whole application system. Async APIs perform especially better in case of poor network. +Besides synchronous queries, an asynchronous query API is also provided by TDengine to insert or query data more efficiently. With a similar hardware and software environment, the async API is 2~4 times faster than sync APIs. Async API works in non-blocking mode, which means an operation can be returned without finishing so that the calling thread can switch to other work to improve the performance of the whole application system. Async APIs perform especially better in the case of poor networks. -Please be noted that async query can only be used with native connection. +Please note that async query can only be used with a native connection. diff --git a/docs-en/07-develop/05-continuous-query.mdx b/docs-en/07-develop/05-continuous-query.mdx index 97e32a17ff325a9f67ac0a732be3dd72ccca8888..1aea5783fc8116a4e02a4b5345d341707cd399ea 100644 --- a/docs-en/07-develop/05-continuous-query.mdx +++ b/docs-en/07-develop/05-continuous-query.mdx @@ -1,18 +1,18 @@ --- sidebar_label: Continuous Query -description: "Continuous query is a query that's executed automatically according to predefined frequency to provide aggregate query capability by time window, it's actually a simplified time driven stream computing." +description: "Continuous query is a query that's executed automatically at a predefined frequency to provide aggregate query capability by time window. It is essentially simplified, time driven, stream computing." title: "Continuous Query" --- -Continuous query is a query that's executed automatically according to predefined frequency to provide aggregate query capability by time window, it's actually a simplified time driven stream computing. Continuous query can be performed on a table or STable in TDengine. The result of continuous query can be pushed to client or written back to TDengine. Each query is executed on a time window, which moves forward with time. The size of time window and the forward sliding time need to be specified with parameter `INTERVAL` and `SLIDING` respectively. +A continuous query is a query that's executed automatically at a predefined frequency to provide aggregate query capability by time window. It is essentially simplified, time driven, stream computing. A continuous query can be performed on a table or STable in TDengine. The results of a continuous query can be pushed to clients or written back to TDengine. Each query is executed on a time window, which moves forward with time. The size of time window and the forward sliding time need to be specified with parameter `INTERVAL` and `SLIDING` respectively. -Continuous query in TDengine is time driven, and can be defined using TAOS SQL directly without any extra operations. With continuous query, the result can be generated according to time window to achieve down sampling of original data. Once a continuous query is defined using TAOS SQL, the query is automatically executed at the end of each time window and the result is pushed back to client or written to TDengine. +A continuous query in TDengine is time driven, and can be defined using TAOS SQL directly without any extra operations. With a continuous query, the result can be generated based on a time window to achieve down sampling of the original data. Once a continuous query is defined using TAOS SQL, the query is automatically executed at the end of each time window and the result is pushed back to clients or written to TDengine. There are some differences between continuous query in TDengine and time window computation in stream computing: - The computation is performed and the result is returned in real time in stream computing, but the computation in continuous query is only started when a time window closes. For example, if the time window is 1 day, then the result will only be generated at 23:59:59. -- If a historical data row is written in to a time widow for which the computation has been finished, the computation will not be performed again and the result will not be pushed to client again either. If the result has been written into TDengine, there will be no update for the result. -- In continuous query, if the result is pushed to client, the client status is not cached on the server side and Exactly-once is not guaranteed by the server either. If the client program crashes, a new time window will be generated from the time where the continuous query is restarted. If the result is written into TDengine, the data written into TDengine can be guaranteed as valid and continuous. +- If a historical data row is written in to a time window for which the computation has already finished, the computation will not be performed again and the result will not be pushed to client applications again. If the results have already been written into TDengine, they will not be updated. +- In continuous query, if the result is pushed to a client, the client status is not cached on the server side and Exactly-once is not guaranteed by the server. If the client program crashes, a new time window will be generated from the time where the continuous query is restarted. If the result is written into TDengine, the data written into TDengine can be guaranteed as valid and continuous. ## Syntax @@ -30,15 +30,15 @@ SLIDING: The time step for which the time window moves forward each time ## How to Use -In this section the use case of meters will be used to introduce how to use continuous query. Assume the STable and sub tables have been created using below SQL statement. +In this section the use case of meters will be used to introduce how to use continuous query. Assume the STable and subtables have been created using the SQL statements below. ```sql create table meters (ts timestamp, current float, voltage int, phase float) tags (location binary(64), groupId int); -create table D1001 using meters tags ("Beijing.Chaoyang", 2); -create table D1002 using meters tags ("Beijing.Haidian", 2); +create table D1001 using meters tags ("California.SanFrancisco", 2); +create table D1002 using meters tags ("California.LosAngeles", 2); ``` -The average voltage for each time window of one minute with 30 seconds as the length of moving forward can be retrieved using below SQL statement. +The SQL statement below retrieves the average voltage for a one minute time window, with each time window moving forward by 30 seconds. ```sql select avg(voltage) from meters interval(1m) sliding(30s); @@ -50,13 +50,13 @@ Whenever the above SQL statement is executed, all the existing data will be comp select avg(voltage) from meters where ts > {startTime} interval(1m) sliding(30s); ``` -Another easier way for same purpose is prepend `create table {tableName} as` before the `select`. +An easier way to achieve this is to prepend `create table {tableName} as` before the `select`. ```sql create table avg_vol as select avg(voltage) from meters interval(1m) sliding(30s); ``` -A table named as `avg_vol` will be created automatically, then every 30 seconds the `select` statement will be executed automatically on the data in the past 1 minutes, i.e. the latest time window, and the result is written into table `avg_vol`. The client program just needs to query from table `avg_vol`. For example: +A table named as `avg_vol` will be created automatically, then every 30 seconds the `select` statement will be executed automatically on the data in the past 1 minute, i.e. the latest time window, and the result is written into table `avg_vol`. The client program just needs to query from table `avg_vol`. For example: ```sql taos> select * from avg_vol; @@ -68,16 +68,16 @@ taos> select * from avg_vol; 2020-07-29 13:39:00.000 | 223.0800000 | ``` -Please be noted that the minimum allowed time window is 10 milliseconds, and no upper limit. +Please note that the minimum allowed time window is 10 milliseconds, and there is no upper limit. -Besides, it's allowed to specify the start and end time of continuous query. If the start time is not specified, the timestamp of the first original row will be considered as the start time; if the end time is not specified, the continuous will be performed infinitely, otherwise it will be terminated once the end time is reached. For example, the continuous query in below SQL statement will be started from now and terminated one hour later. +It's possible to specify the start and end time of a continuous query. If the start time is not specified, the timestamp of the first row will be considered as the start time; if the end time is not specified, the continuous query will be performed indefinitely, otherwise it will be terminated once the end time is reached. For example, the continuous query in the SQL statement below will be started from now and terminated one hour later. ```sql create table avg_vol as select avg(voltage) from meters where ts > now and ts <= now + 1h interval(1m) sliding(30s); ``` -`now` in above SQL statement stands for the time when the continuous query is created, not the time when the computation is actually performed. Besides, to avoid the trouble caused by the delay of original data as much as possible, the actual computation in continuous query is also started with a little delay. That means, once a time window closes, the computation is not started immediately. Normally, the result can only be available a little time later, normally within one minute, after the time window closes. +`now` in the above SQL statement stands for the time when the continuous query is created, not the time when the computation is actually performed. To avoid the trouble caused by a delay in receiving data as much as possible, the actual computation in a continuous query is started after a little delay. That means, once a time window closes, the computation is not started immediately. Normally, the result are available after a little time, normally within one minute, after the time window closes. ## How to Manage -`show streams` command can be used in TDengine CLI `taos` to show all the continuous queries in the system, and `kill stream` can be used to terminate a continuous query. +`show streams` command can be used in the TDengine CLI `taos` to show all the continuous queries in the system, and `kill stream` can be used to terminate a continuous query. diff --git a/docs-en/07-develop/06-subscribe.mdx b/docs-en/07-develop/06-subscribe.mdx index 56f4ed83d8ebc6f21afbdd2eca2e01f11b313883..782fcdbaf221419dd231bd10958e26b8f4f856e5 100644 --- a/docs-en/07-develop/06-subscribe.mdx +++ b/docs-en/07-develop/06-subscribe.mdx @@ -1,6 +1,6 @@ --- -sidebar_label: Subscription -description: "Lightweight service for data subscription and pushing, the time series data inserted into TDengine continuously can be pushed automatically to the subscribing clients." +sidebar_label: Data Subscription +description: "Lightweight service for data subscription and publishing. Time series data inserted into TDengine continuously can be pushed automatically to subscribing clients." title: Data Subscription --- @@ -16,9 +16,9 @@ import CDemo from "./_sub_c.mdx"; ## Introduction -According to the time series nature of the data, data inserting in TDengine is similar to data publishing in message queues, they both can be considered as a new data record with timestamp is inserted into the system. Data is stored in ascending order of timestamp inside TDengine, so essentially each table in TDengine can be considered as a message queue. +Due to the nature of time series data, data insertion into TDengine is similar to data publishing in message queues. Data is stored in ascending order of timestamp inside TDengine, and so each table in TDengine can essentially be considered as a message queue. -Lightweight service for data subscription and pushing is built in TDengine. With the API provided by TDengine, client programs can used `select` statement to subscribe the data from one or more tables. The subscription and and state maintenance is performed on the client side, the client programs polls the server to check whether there is new data, and if so the new data will be pushed back to the client side. If the client program is restarted, where to start for retrieving new data is up to the client side. +A lightweight service for data subscription and publishing is built into TDengine. With the API provided by TDengine, client programs can use `select` statements to subscribe to data from one or more tables. The subscription and state maintenance is performed on the client side. The client programs poll the server to check whether there is new data, and if so the new data will be pushed back to the client side. If the client program is restarted, where to start retrieving new data is up to the client side. There are 3 major APIs related to subscription provided in the TDengine client driver. @@ -28,11 +28,11 @@ taos_consume taos_unsubscribe ``` -For more details about these API please refer to [C/C++ Connector](/reference/connector/cpp). Their usage will be introduced below using the use case of meters, in which the schema of STable and sub tables please refer to the previous section "continuous query". Full sample code can be found [here](https://github.com/taosdata/TDengine/blob/master/examples/c/subscribe.c). +For more details about these APIs please refer to [C/C++ Connector](/reference/connector/cpp). Their usage will be introduced below using the use case of meters, in which the schema of STable and subtables from the previous section [Continuous Query](/develop/continuous-query) are used. Full sample code can be found [here](https://github.com/taosdata/TDengine/blob/master/examples/c/subscribe.c). -If we want to get notification and take some actions if the current exceeds a threshold, like 10A, from some meters, there are two ways: +If we want to get a notification and take some actions if the current exceeds a threshold, like 10A, from some meters, there are two ways: -The first way is to query on each sub table and record the last timestamp matching the criteria, then after some time query on the data later than recorded timestamp and repeat this process. The SQL statements for this way are as below. +The first way is to query each sub table and record the last timestamp matching the criteria. Then after some time, query the data later than the recorded timestamp, and repeat this process. The SQL statements for this way are as below. ```sql select * from D1001 where ts > {last_timestamp1} and current > 10; @@ -40,7 +40,7 @@ select * from D1002 where ts > {last_timestamp2} and current > 10; ... ``` -The above way works, but the problem is that the number of `select` statements increases with the number of meters grows. Finally the performance of both client side and server side will be unacceptable once the number of meters grows to a big enough number. +The above way works, but the problem is that the number of `select` statements increases with the number of meters. Additionally, the performance of both client side and server side will be unacceptable once the number of meters grows to a big enough number. A better way is to query on the STable, only one `select` is enough regardless of the number of meters, like below: @@ -48,9 +48,9 @@ A better way is to query on the STable, only one `select` is enough regardless o select * from meters where ts > {last_timestamp} and current > 10; ``` -However, how to choose `last_timestamp` becomes a new problem if using this way. Firstly, the timestamp when the data is generated is different from the timestamp when the data is inserted into the database, sometimes the difference between them may be very big. Secondly, the time when the data from different meters may arrives at the database may be different too. If the timestamp of the "slowest" meter is used as `last_timestamp` in the query, the data from other meters may be selected repeatedly; but if the timestamp of the "fasted" meters is used as `last_timestamp`, some data from other meters may be missed. +However, this presents a new problem in how to choose `last_timestamp`. First, the timestamp when the data is generated is different from the timestamp when the data is inserted into the database, sometimes the difference between them may be very big. Second, the time when the data from different meters arrives at the database may be different too. If the timestamp of the "slowest" meter is used as `last_timestamp` in the query, the data from other meters may be selected repeatedly; but if the timestamp of the "fastest" meter is used as `last_timestamp`, some data from other meters may be missed. -All the problems mentioned above can be resolved thoroughly using subscription provided by TDengine. +All the problems mentioned above can be resolved easily using the subscription functionality provided by TDengine. The first step is to create subscription using `taos_subscribe`. @@ -65,31 +65,33 @@ if (async) { } ``` -The subscription in TDengine can be either synchronous or asynchronous. In the above sample code, the value of variable `async` is determined from the CLI input, then it's used to create either an async or sync subscription. Sync subscription means the client program needs to invoke `taos_consume` to retrieve data, and async subscription means another thread created by `taos_subscribe` internally invokes `taos_consume` to retrieve data and pass the data to `subscribe_callback` for processing, `subscribe_callback` is a call back function provided by the client program and it's suggested not to do time consuming operation in the call back function. +The subscription in TDengine can be either synchronous or asynchronous. In the above sample code, the value of variable `async` is determined from the CLI input, then it's used to create either an async or sync subscription. Sync subscription means the client program needs to invoke `taos_consume` to retrieve data, and async subscription means another thread created by `taos_subscribe` internally invokes `taos_consume` to retrieve data and pass the data to `subscribe_callback` for processing. `subscribe_callback` is a callback function provided by the client program. You should not perform time consuming operations in the callback function. -The parameter `taos` is an established connection. There is nothing special in sync subscription mode. In async subscription, it should be exclusively by current thread, otherwise unpredictable error may occur. +The parameter `taos` is an established connection. Nothing special needs to be done for thread safety for synchronous subscription. For asynchronous subscription, the taos_subscribe function should be called exclusively by the current thread, to avoid unpredictable errors. -The parameter `sql` is a `select` statement in which `where` clause can be used to specify filter conditions. In our example, the data whose current exceeds 10A needs to be subscribed like below SQL statement: +The parameter `sql` is a `select` statement in which the `where` clause can be used to specify filter conditions. In our example, we can subscribe to the records in which the current exceeds 10A, with the following SQL statement: ```sql select * from meters where current > 10; ``` -Please be noted that, all the data will be processed because no start time is specified. If only the data from one day ago needs to be processed, a time related condition can be added: +Please note that, all the data will be processed because no start time is specified. If we only want to process data for the past day, a time related condition can be added: ```sql select * from meters where ts > now - 1d and current > 10; ``` -The parameter `topic` is the name of the subscription, it needs to be guaranteed unique in the client program, but it's not necessary to be globally unique because subscription is implemented in the APIs on client side. +The parameter `topic` is the name of the subscription. The client application must guarantee that the name is unique. However, it doesn't have to be globally unique because subscription is implemented in the APIs on the client side. -If the subscription named as `topic` doesn't exist, parameter `restart` would be ignored. If the subscription named as `topic` has been created before by the client program which then exited, when the client program is restarted to use this `topic`, parameter `restart` is used to determine retrieving data from beginning or from the last point where the subscription was broken. If the value of `restart` is **true** (i.e. a non-zero value), the data will be retrieved from beginning, or if it is **false** (i.e. zero), the data already consumed before will not be processed again. +If the subscription named as `topic` doesn't exist, the parameter `restart` will be ignored. If the subscription named as `topic` has been created before by the client program, when the client program is restarted with the subscription named `topic`, parameter `restart` is used to determine whether to retrieve data from the beginning or from the last point where the subscription was broken. -The last parameter of `taos_subscribe` is the polling interval in unit of millisecond. In sync mode, if the time difference between two continuous invocations to `taos_consume` is smaller than the interval specified by `taos_subscribe`, `taos_consume` would be blocked until the interval is reached. In async mode, this interval is the minimum interval between two invocations to the call back function. +If the value of `restart` is **true** (i.e. a non-zero value), data will be retrieved from the beginning. If it is **false** (i.e. zero), the data already consumed before will not be processed again. -The last second parameter of `taos_subscribe` is used to pass arguments to the call back function. `taos_subscribe` doesn't process this parameter and simply passes it to the call back function. This parameter is simply ignored in sync mode. +The last parameter of `taos_subscribe` is the polling interval in units of millisecond. In sync mode, if the time difference between two continuous invocations to `taos_consume` is smaller than the interval specified by `taos_subscribe`, `taos_consume` will be blocked until the interval is reached. In async mode, this interval is the minimum interval between two invocations to the call back function. -After a subscription is created, its data can be consumed and processed, below is the sample code of how to consume data in sync mode, in the else part if `if (async)`. +The second to last parameter of `taos_subscribe` is used to pass arguments to the call back function. `taos_subscribe` doesn't process this parameter and simply passes it to the call back function. This parameter is simply ignored in sync mode. + +After a subscription is created, its data can be consumed and processed. Shown below is the sample code to consume data in sync mode, in the else condition of `if (async)`. ```c if (async) { @@ -106,7 +108,7 @@ if (async) { } ``` -In the above sample code, there is an infinite loop, each time carriage return is entered `taos_consume` is invoked, the return value of `taos_consume` is the selected result set, exactly as the input of `taos_use_result`, in the above sample `print_result` is used instead to simplify the sample. Below is the implementation of `print_result`. +In the above sample code in the else condition, there is an infinite loop. Each time carriage return is entered `taos_consume` is invoked. The return value of `taos_consume` is the selected result set. In the above sample, `print_result` is used to simplify the printing of the result set. It is similar to `taos_use_result`. Below is the implementation of `print_result`. ```c void print_result(TAOS_RES* res, int blockFetch) { @@ -133,9 +135,9 @@ void print_result(TAOS_RES* res, int blockFetch) { } ``` -In the above code `taos_print_row` is used to process the data consumed. All the matching rows will be printed. +In the above code `taos_print_row` is used to process the data consumed. All matching rows are printed. -In async mode, the data consuming is simpler as below. +In async mode, consuming data is simpler as shown below. ```c void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { @@ -149,22 +151,22 @@ void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { taos_unsubscribe(tsub, keep); ``` -The second parameter `keep` is used to specify whether to keep the subscription progress on the client sde. If it is **false**, i.e. **0**, then subscription will be restarted from beginning regardless of the `restart` parameter's value in when `taos_subscribe` is invoked again. The subscription progress information is stored in _{DataDir}/subscribe/_ , under which there is a file with same name as `topic` for each subscription, the subscription will be restarted from beginning if the corresponding progress file is removed. +The second parameter `keep` is used to specify whether to keep the subscription progress on the client sde. If it is **false**, i.e. **0**, then subscription will be restarted from beginning regardless of the `restart` parameter's value when `taos_subscribe` is invoked again. The subscription progress information is stored in _{DataDir}/subscribe/_ , under which there is a file with the same name as `topic` for each subscription(Note: The default value of `DataDir` in the `taos.cfg` file is **/var/lib/taos/**. However, **/var/lib/taos/** does not exist on the Windows server. So you need to change the `DataDir` value to the corresponding existing directory."), the subscription will be restarted from the beginning if the corresponding progress file is removed. Now let's see the effect of the above sample code, assuming below prerequisites have been done. - The sample code has been downloaded to local system - TDengine has been installed and launched properly on same system -- The database, STable, sub tables required in the sample code have been ready +- The database, STable, and subtables required in the sample code are ready -It's ready to launch below command in the directory where the sample code resides to compile and start the program. +Launch the command below in the directory where the sample code resides to compile and start the program. ```bash make ./subscribe -sql='select * from meters where current > 10;' ``` -After the program is started, open another terminal and launch TDengine CLI `taos`, then use below SQL commands to insert a row whose current is 12A into table **D1001**. +After the program is started, open another terminal and launch TDengine CLI `taos`, then use the below SQL commands to insert a row whose current is 12A into table **D1001**. ```sql use test; @@ -175,7 +177,7 @@ Then, this row of data will be shown by the example program on the first termina ## Examples -Below example program demonstrates how to subscribe the data rows whose current exceeds 10A using connectors. +The example program below demonstrates how to subscribe, using connectors, to data rows in which current exceeds 10A. ### Prepare Data @@ -187,8 +189,8 @@ taos> use power; # create super table "meters" taos> create table meters(ts timestamp, current float, voltage int, phase int) tags(location binary(64), groupId int); # create tabes using the schema defined by super table "meters" -taos> create table d1001 using meters tags ("Beijing.Chaoyang", 2); -taos> create table d1002 using meters tags ("Beijing.Haidian", 2); +taos> create table d1001 using meters tags ("California.SanFrancisco", 2); +taos> create table d1002 using meters tags ("California.LoSangeles", 2); # insert some rows taos> insert into d1001 values("2020-08-15 12:00:00.000", 12, 220, 1),("2020-08-15 12:10:00.000", 12.3, 220, 2),("2020-08-15 12:20:00.000", 12.2, 220, 1); taos> insert into d1002 values("2020-08-15 12:00:00.000", 9.9, 220, 1),("2020-08-15 12:10:00.000", 10.3, 220, 1),("2020-08-15 12:20:00.000", 11.2, 220, 1); @@ -196,11 +198,11 @@ taos> insert into d1002 values("2020-08-15 12:00:00.000", 9.9, 220, 1),("2020-08 taos> select * from meters where current > 10; ts | current | voltage | phase | location | groupid | =========================================================================================================== - 2020-08-15 12:10:00.000 | 10.30000 | 220 | 1 | Beijing.Haidian | 2 | - 2020-08-15 12:20:00.000 | 11.20000 | 220 | 1 | Beijing.Haidian | 2 | - 2020-08-15 12:00:00.000 | 12.00000 | 220 | 1 | Beijing.Chaoyang | 2 | - 2020-08-15 12:10:00.000 | 12.30000 | 220 | 2 | Beijing.Chaoyang | 2 | - 2020-08-15 12:20:00.000 | 12.20000 | 220 | 1 | Beijing.Chaoyang | 2 | + 2020-08-15 12:10:00.000 | 10.30000 | 220 | 1 | California.LoSangeles | 2 | + 2020-08-15 12:20:00.000 | 11.20000 | 220 | 1 | California.LoSangeles | 2 | + 2020-08-15 12:00:00.000 | 12.00000 | 220 | 1 | California.SanFrancisco | 2 | + 2020-08-15 12:10:00.000 | 12.30000 | 220 | 2 | California.SanFrancisco | 2 | + 2020-08-15 12:20:00.000 | 12.20000 | 220 | 1 | California.SanFrancisco | 2 | Query OK, 5 row(s) in set (0.004896s) ``` @@ -232,14 +234,14 @@ Query OK, 5 row(s) in set (0.004896s) ### Run the Examples -The example programs firstly consume all historical data matching the criteria. +The example programs first consume all historical data matching the criteria. ```bash -ts: 1597464000000 current: 12.0 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid : 2 -ts: 1597464600000 current: 12.3 voltage: 220 phase: 2 location: Beijing.Chaoyang groupid : 2 -ts: 1597465200000 current: 12.2 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid : 2 -ts: 1597464600000 current: 10.3 voltage: 220 phase: 1 location: Beijing.Haidian groupid : 2 -ts: 1597465200000 current: 11.2 voltage: 220 phase: 1 location: Beijing.Haidian groupid : 2 +ts: 1597464000000 current: 12.0 voltage: 220 phase: 1 location: California.SanFrancisco groupid : 2 +ts: 1597464600000 current: 12.3 voltage: 220 phase: 2 location: California.SanFrancisco groupid : 2 +ts: 1597465200000 current: 12.2 voltage: 220 phase: 1 location: California.SanFrancisco groupid : 2 +ts: 1597464600000 current: 10.3 voltage: 220 phase: 1 location: California.LoSangeles groupid : 2 +ts: 1597465200000 current: 11.2 voltage: 220 phase: 1 location: California.LoSangeles groupid : 2 ``` Next, use TDengine CLI to insert a new row. @@ -250,8 +252,8 @@ taos> use power; taos> insert into d1001 values(now, 12.4, 220, 1); ``` -Because the current in inserted row exceeds 10A, it will be consumed by the example program. +Because the current in the inserted row exceeds 10A, it will be consumed by the example program. ``` -ts: 1651146662805 current: 12.4 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid: 2 +ts: 1651146662805 current: 12.4 voltage: 220 phase: 1 location: California.SanFrancisco groupid: 2 ``` diff --git a/docs-en/07-develop/07-cache.md b/docs-en/07-develop/07-cache.md index 13db6c363802abed290cfc4d4466d40e48852f3d..743452faff6a2be8466318a7dab61a44e33c3664 100644 --- a/docs-en/07-develop/07-cache.md +++ b/docs-en/07-develop/07-cache.md @@ -4,16 +4,16 @@ title: Cache description: "The latest row of each table is kept in cache to provide high performance query of latest state." --- -The cache management policy in TDengine is First-In-First-Out (FIFO), which is also known as insert driven cache management policy and different from read driven cache management, i.e. Least-Recent-Used (LRU). It simply stores the latest data in cache and flushes the oldest data in cache to disk when the cache usage reaches a threshold. In IoT use cases, the most cared about data is the latest data, i.e. current state. The cache policy in TDengine is based the nature of IoT data. +The cache management policy in TDengine is First-In-First-Out (FIFO). FIFO is also known as insert driven cache management policy and it is different from read driven cache management, which is more commonly known as Least-Recently-Used (LRU). FIFO simply stores the latest data in cache and flushes the oldest data in cache to disk, when the cache usage reaches a threshold. In IoT use cases, it is the current state i.e. the latest or most recent data that is important. The cache policy in TDengine, like much of the design and architecture of TDengine, is based on the nature of IoT data. -Caching the latest data provides the capability of retrieving data in milliseconds. With this capability, TDengine can be configured properly to be used as caching system without deploying another separate caching system to simplify the system architecture and minimize the operation cost. The cache will be emptied after TDengine is restarted, TDengine doesn't reload data from disk into cache like a real key-value caching system. +Caching the latest data provides the capability of retrieving data in milliseconds. With this capability, TDengine can be configured properly to be used as a caching system without deploying another separate caching system. This simplifies the system architecture and minimizes operational costs. The cache is emptied after TDengine is restarted. TDengine does not reload data from disk into cache, like a key-value caching system. -The memory space used by TDengine cache is fixed in size, according to the configuration based on application requirement and system resources. Independent memory pool is allocated for and managed by each vnode (virtual node) in TDengine, there is no sharing of memory pools between vnodes. All the tables belonging to a vnode share all the cache memory of the vnode. +The memory space used by the TDengine cache is fixed in size and configurable. It should be allocated based on application requirements and system resources. An independent memory pool is allocated for and managed by each vnode (virtual node) in TDengine. There is no sharing of memory pools between vnodes. All the tables belonging to a vnode share all the cache memory of the vnode. -Memory pool is divided into blocks and data is stored in row format in memory and each block follows FIFO policy. The size of each block is determined by configuration parameter `cache`, the number of blocks for each vnode is determined by `blocks`. For each vnode, the total cache size is `cache * blocks`. It's better to set the size of each block to hold at least tends of rows. +The memory pool is divided into blocks and data is stored in row format in memory and each block follows FIFO policy. The size of each block is determined by configuration parameter `cache` and the number of blocks for each vnode is determined by the parameter `blocks`. For each vnode, the total cache size is `cache * blocks`. A cache block needs to ensure that each table can store at least dozens of records, to be efficient. -`last_row` function can be used to retrieve the last row of a table or a STable to quickly show the current state of devices on monitoring screen. For example below SQL statement retrieves the latest voltage of all meters in Chaoyang district of Beijing. +`last_row` function can be used to retrieve the last row of a table or a STable to quickly show the current state of devices on monitoring screen. For example the below SQL statement retrieves the latest voltage of all meters in San Francisco, California. ```sql -select last_row(voltage) from meters where location='Beijing.Chaoyang'; +select last_row(voltage) from meters where location='California.SanFrancisco'; ``` diff --git a/docs-en/07-develop/08-udf.md b/docs-en/07-develop/08-udf.md index 61639e34404477d3bb5785da129a1d922a4d020e..49bc95bd91a4c31d42d2b21ef05d69225f1bd963 100644 --- a/docs-en/07-develop/08-udf.md +++ b/docs-en/07-develop/08-udf.md @@ -1,24 +1,31 @@ --- sidebar_label: UDF -title: User Defined Functions -description: "Scalar functions and aggregate functions developed by users can be utilized by the query framework to expand the query capability" +title: User Defined Functions(UDF) +description: "Scalar functions and aggregate functions developed by users can be utilized by the query framework to expand query capability" --- -In some use cases, the query capability required by application programs can't be achieved directly by builtin functions. With UDF, the functions developed by users can be utilized by query framework to meet some special requirements. UDF normally takes one column of data as input, but can also support the result of sub query as input. +In some use cases, built-in functions are not adequate for the query capability required by application programs. With UDF, the functions developed by users can be utilized by the query framework to meet business and application requirements. UDF normally takes one column of data as input, but can also support the result of a sub-query as input. -From version 2.2.0.0, UDF programmed in C/C++ language can be supported by TDengine. +From version 2.2.0.0, UDF written in C/C++ are supported by TDengine. -Two kinds of functions can be implemented by UDF: scalar function and aggregate function. -## Define UDF +## Types of UDF + +Two kinds of functions can be implemented by UDF: scalar functions and aggregate functions. + +Scalar functions return multiple rows and aggregate functions return either 0 or 1 row. + +In the case of a scalar function you only have to implement the "normal" function template. + +In the case of an aggregate function, in addition to the "normal" function, you also need to implement the "merge" and "finalize" function templates even if the implementation is empty. This will become clear in the sections below. ### Scalar Function -Below function template can be used to define your own scalar function. +As mentioned earlier, a scalar UDF only has to implement the "normal" function template. The function template below can be used to define your own scalar function. `void udfNormalFunc(char* data, short itype, short ibytes, int numOfRows, long long* ts, char* dataOutput, char* interBuf, char* tsOutput, int* numOfOutput, short otype, short obytes, SUdfInit* buf)` -`udfNormalFunc` is the place holder of function name, a function implemented based on the above template can be used to perform scalar computation on data rows. The parameters are fixed to control the data exchange between UDF and TDengine. +`udfNormalFunc` is the place holder for a function name. A function implemented based on the above template can be used to perform scalar computation on data rows. The parameters are fixed to control the data exchange between UDF and TDengine. - Definitions of the parameters: @@ -30,20 +37,24 @@ Below function template can be used to define your own scalar function. - numOfRows:the number of rows in the input data - ts: the column of timestamp corresponding to the input data - dataOutput:the buffer for output data, total size is `oBytes * numberOfRows` - - interBuf:the buffer for intermediate result, its size is specified by `BUFSIZE` parameter when creating a UDF. It's normally used when the intermediate result is not same as the final result, it's allocated and freed by TDengine. + - interBuf:the buffer for an intermediate result. Its size is specified by the `BUFSIZE` parameter when creating a UDF. It's normally used when the intermediate result is not same as the final result. This buffer is allocated and freed by TDengine. - tsOutput:the column of timestamps corresponding to the output data; it can be used to output timestamp together with the output data if it's not NULL - numOfOutput:the number of rows in output data - buf:for the state exchange between UDF and TDengine - [add_one.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/add_one.c) is one example of the simplest UDF implementations, i.e. one instance of the above `udfNormalFunc` template. It adds one to each value of a column passed in which can be filtered using `where` clause and outputs the result. + [add_one.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/add_one.c) is one example of a very simple UDF implementation, i.e. one instance of the above `udfNormalFunc` template. It adds one to each value of a passed in column, which can be filtered using the `where` clause, and outputs the result. ### Aggregate Function -Below function template can be used to define your own aggregate function. +For aggregate UDF, as mentioned earlier you must implement a "normal" function template (described above) and also implement the "merge" and "finalize" templates. -`void abs_max_merge(char* data, int32_t numOfRows, char* dataOutput, int32_t* numOfOutput, SUdfInit* buf)` +#### Merge Function Template -`udfMergeFunc` is the place holder of function name, the function implemented with the above template is used to aggregate the intermediate result, only can be used in the aggregate query for STable. +The function template below can be used to define your own merge function for an aggregate UDF. + +`void udfMergeFunc(char* data, int32_t numOfRows, char* dataOutput, int32_t* numOfOutput, SUdfInit* buf)` + +`udfMergeFunc` is the place holder for a function name. The function implemented with the above template is used to aggregate intermediate results and can only be used in the aggregate query for STable. Definitions of the parameters: @@ -53,17 +64,11 @@ Definitions of the parameters: - numOfOutput:number of rows in the output data - buf:for the state exchange between UDF and TDengine -[abs_max.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/abs_max.c) is an user defined aggregate function to get the maximum from the absolute value of a column. - -The internal processing is that the data affected by the select statement will be divided into multiple row blocks and `udfNormalFunc`, i.e. `abs_max` in this case, is performed on each row block to generate the intermediate of each sub table, then `udfMergeFunc`, i.e. `abs_max_merge` in this case, is performed on the intermediate result of sub tables to aggregate to generate the final or intermediate result of STable. The intermediate result of STable is finally processed by `udfFinalizeFunc` to generate the final result, which contain either 0 or 1 row. - -Other typical scenarios, like covariance, can also be achieved by aggregate UDF. +#### Finalize Function Template -### Finalize +The function template below can be used to finalize the result of your own UDF, normally used when interBuf is used. -Below function template can be used to finalize the result of your own UDF, normally used when interBuf is used. - -`void abs_max_finalize(char* dataOutput, char* interBuf, int* numOfOutput, SUdfInit* buf)` +`void udfFinalizeFunc(char* dataOutput, char* interBuf, int* numOfOutput, SUdfInit* buf)` `udfFinalizeFunc` is the place holder of function name, definitions of the parameter are as below: @@ -72,47 +77,64 @@ Below function template can be used to finalize the result of your own UDF, norm - numOfOutput:number of output data, can only be 0 or 1 for aggregate function - buf:for state exchange between UDF and TDengine -## UDF Conventions +### Example abs_max.c + +[abs_max.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/abs_max.c) is an example of a user defined aggregate function to get the maximum from the absolute values of a column. + +The internal processing happens as follows. The results of the select statement are divided into multiple row blocks and `udfNormalFunc`, i.e. `abs_max` in this case, is performed on each row block to generate the intermediate results for each sub table. Then `udfMergeFunc`, i.e. `abs_max_merge` in this case, is performed on the intermediate result of sub tables to aggregate and generate the final or intermediate result of STable. The intermediate result of STable is finally processed by `udfFinalizeFunc`, i.e. `abs_max_finalize` in this example, to generate the final result, which contains either 0 or 1 row. + +Other typical aggregation functions such as covariance, can also be implemented using aggregate UDF. -The naming of 3 kinds of UDF, i.e. udfNormalFunc, udfMergeFunc, and udfFinalizeFunc is required to have same prefix, i.e. the actual name of udfNormalFunc, which means udfNormalFunc doesn't need a suffix following the function name. While udfMergeFunc should be udfNormalFunc followed by `_merge`, udfFinalizeFunc should be udfNormalFunc followed by `_finalize`. The naming convention is part of UDF framework, TDengine follows this convention to invoke corresponding actual functions.\ +## UDF Naming Conventions -According to the kind of UDF to implement, the functions that need to be implemented are different. +The naming convention for the 3 kinds of function templates required by UDF is as follows: + - udfNormalFunc, udfMergeFunc, and udfFinalizeFunc are required to have same prefix, i.e. the actual name of udfNormalFunc. The udfNormalFunc doesn't need a suffix following the function name. + - udfMergeFunc should be udfNormalFunc followed by `_merge` + - udfFinalizeFunc should be udfNormalFunc followed by `_finalize`. + +The naming convention is part of TDengine's UDF framework. TDengine follows this convention to invoke the corresponding actual functions. -- Scalar function:udfNormalFunc is required -- Aggregate function:udfNormalFunc, udfMergeFunc (if query on STable) and udfFinalizeFunc are required +Depending on whether you are creating a scalar UDF or aggregate UDF, the functions that you need to implement are different. -To be more accurate, assuming we want to implement a UDF named "foo". If the function is a scalar function, what we really need to implement is `foo`; if the function is aggregate function, we need to implement `foo`, `foo_merge`, and `foo_finalize`. For aggregate UDF, even though one of the three functions is not necessary, there must be an empty implementation. +- Scalar function:udfNormalFunc is required. +- Aggregate function:udfNormalFunc, udfMergeFunc (if query on STable) and udfFinalizeFunc are required. + +For clarity, assuming we want to implement a UDF named "foo": +- If the function is a scalar function, we only need to implement the "normal" function template and it should be named simply `foo`. +- If the function is an aggregate function, we need to implement `foo`, `foo_merge`, and `foo_finalize`. Note that for aggregate UDF, even though one of the three functions is not necessary, there must be an empty implementation. ## Compile UDF -The source code of UDF in C can't be utilized by TDengine directly. UDF can only be loaded into TDengine after compiling to dynamically linked library. +The source code of UDF in C can't be utilized by TDengine directly. UDF can only be loaded into TDengine after compiling to dynamically linked library (DLL). -For example, the example UDF `add_one.c` mentioned in previous sections need to be compiled into DLL using below command on Linux Shell. +For example, the example UDF `add_one.c` mentioned earlier, can be compiled into DLL using the command below, in a Linux Shell. ```bash gcc -g -O0 -fPIC -shared add_one.c -o add_one.so ``` -The generated DLL file `dd_one.so` can be used later when creating UDF. It's recommended to use GCC not older than 7.5. +The generated DLL file `add_one.so` can be used later when creating a UDF. It's recommended to use GCC not older than 7.5. ## Create and Use UDF +When a UDF is created in a TDengine instance, it is available across the databases in that instance. + ### Create UDF -SQL command can be executed on the same hos where the generated UDF DLL resides to load the UDF DLL into TDengine, this operation can't be done through REST interface or web console. Once created, all the clients of the current TDengine can use these UDF functions in their SQL commands. UDF are stored in the management node of TDengine. The UDFs loaded in TDengine would be still available after TDengine is restarted. +SQL command can be executed on the host where the generated UDF DLL resides to load the UDF DLL into TDengine. This operation cannot be done through REST interface or web console. Once created, any client of the current TDengine can use these UDF functions in their SQL commands. UDF are stored in the management node of TDengine. The UDFs loaded in TDengine would be still available after TDengine is restarted. -When creating UDF, it needs to be clarified as either scalar function or aggregate function. If the specified type is wrong, the SQL statements using the function would fail with error. Besides, the input type and output type don't need to be same in UDF, but the input data type and output data type need to be consistent with the UDF definition. +When creating UDF, the type of UDF, i.e. a scalar function or aggregate function must be specified. If the specified type is wrong, the SQL statements using the function would fail with errors. The input type and output type don't need to be the same in UDF, but the input data type and output data type must be consistent with the UDF definition. - Create Scalar Function ```sql -CREATE FUNCTION ids(X) AS ids(Y) OUTPUTTYPE typename(Z) [ BUFSIZE B ]; +CREATE FUNCTION userDefinedFunctionName AS "/absolute/path/to/userDefinedFunctionName.so" OUTPUTTYPE [BUFSIZE B]; ``` -- ids(X):the function name to be sued in SQL statement, must be consistent with the function name defined by `udfNormalFunc` -- ids(Y):the absolute path of the DLL file including the implementation of the UDF, the path needs to be quoted by single or double quotes -- typename(Z):the output data type, the value is the literal string of the type -- B:the size of intermediate buffer, in bytes; it's an optional parameter and the range is [0,512] +- userDefinedFunctionName:The function name to be used in SQL statement which must be consistent with the function name defined by `udfNormalFunc` and is also the name of the compiled DLL (.so file). +- path:The absolute path of the DLL file including the name of the shared object file (.so). The path must be quoted with single or double quotes. +- outputtype:The output data type, the value is the literal string of the supported TDengine data type. +- B:the size of intermediate buffer, in bytes; it is an optional parameter and the range is [0,512]. For example, below SQL statement can be used to create a UDF from `add_one.so`. @@ -123,17 +145,17 @@ CREATE FUNCTION add_one AS "/home/taos/udf_example/add_one.so" OUTPUTTYPE INT; - Create Aggregate Function ```sql -CREATE AGGREGATE FUNCTION ids(X) AS ids(Y) OUTPUTTYPE typename(Z) [ BUFSIZE B ]; +CREATE AGGREGATE FUNCTION userDefinedFunctionName AS "/absolute/path/to/userDefinedFunctionName.so" OUTPUTTYPE [ BUFSIZE B ]; ``` -- ids(X):the function name to be sued in SQL statement, must be consistent with the function name defined by `udfNormalFunc` -- ids(Y):the absolute path of the DLL file including the implementation of the UDF, the path needs to be quoted by single or double quotes -- typename(Z):the output data type, the value is the literal string of the type +- userDefinedFunctionName:the function name to be used in SQL statement which must be consistent with the function name defined by `udfNormalFunc` and is also the name of the compiled DLL (.so file). +- path:the absolute path of the DLL file including the name of the shared object file (.so). The path needs to be quoted by single or double quotes. +- OUTPUTTYPE:the output data type, the value is the literal string of the type - B:the size of intermediate buffer, in bytes; it's an optional parameter and the range is [0,512] For details about how to use intermediate result, please refer to example program [demo.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/demo.c). -For example, below SQL statement can be used to create a UDF rom `demo.so`. +For example, below SQL statement can be used to create a UDF from `demo.so`. ```sql CREATE AGGREGATE FUNCTION demo AS "/home/taos/udf_example/demo.so" OUTPUTTYPE DOUBLE bufsize 14; @@ -176,11 +198,11 @@ In current version there are some restrictions for UDF 1. Only Linux is supported when creating and invoking UDF for both client side and server side 2. UDF can't be mixed with builtin functions 3. Only one UDF can be used in a SQL statement -4. Single column is supported as input for UDF +4. Only a single column is supported as input for UDF 5. Once created successfully, UDF is persisted in MNode of TDengineUDF 6. UDF can't be created through REST interface 7. The function name used when creating UDF in SQL must be consistent with the function name defined in the DLL, i.e. the name defined by `udfNormalFunc` -8. The name name of UDF name should not conflict with any of builtin functions +8. The name of a UDF should not conflict with any of TDengine's built-in functions ## Examples diff --git a/docs-en/07-develop/index.md b/docs-en/07-develop/index.md index 122dd0d870ac42b62c4f9e694cf79eec3ca122a5..e3f55f290753f79ac1708337082ce90bb050b21f 100644 --- a/docs-en/07-develop/index.md +++ b/docs-en/07-develop/index.md @@ -2,15 +2,15 @@ title: Developer Guide --- -To develop an application using TDengine to process time-series data, we recommend taking the following steps: +To develop an application to process time-series data using TDengine, we recommend taking the following steps: -1. Choose the way for connection to TDengine. No matter what programming language you use, you can always use the REST interface to access TDengine, but you can also use connectors unique to each programming language. -2. Design the data model based on your own application scenarios. Learn the [concepts](/concept/) of TDengine including "one table for one data collection point" and the "super table" concept; learn about static labels, collected metrics, and subtables. According to the data characteristics, you may decide to create one or more databases, and you should design the STable schema to fit your data. -3. Decide how to insert data. TDengine supports writing using standard SQL, but also supports schemaless writing, so that data can be written directly without creating tables manually. -4. Based on business requirements, find out what SQL query statements need to be written. +1. Choose the method to connect to TDengine. No matter what programming language you use, you can always use the REST interface to access TDengine, but you can also use connectors unique to each programming language. +2. Design the data model based on your own use cases. Learn the [concepts](/concept/) of TDengine including "one table for one data collection point" and the "super table" (STable) concept; learn about static labels, collected metrics, and subtables. Depending on the characteristics of your data and your requirements, you may decide to create one or more databases, and you should design the STable schema to fit your data. +3. Decide how you will insert data. TDengine supports writing using standard SQL, but also supports schemaless writing, so that data can be written directly without creating tables manually. +4. Based on business requirements, find out what SQL query statements need to be written. You may be able to repurpose any existing SQL. 5. If you want to run real-time analysis based on time series data, including various dashboards, it is recommended that you use the TDengine continuous query feature instead of deploying complex streaming processing systems such as Spark or Flink. 6. If your application has modules that need to consume inserted data, and they need to be notified when new data is inserted, it is recommended that you use the data subscription function provided by TDengine without the need to deploy Kafka. -7. In many scenarios (such as fleet management), the application needs to obtain the latest status of each data collection point. It is recommended that you use the cache function of TDengine instead of deploying Redis separately. +7. In many use cases (such as fleet management), the application needs to obtain the latest status of each data collection point. It is recommended that you use the cache function of TDengine instead of deploying Redis separately. 8. If you find that the SQL functions of TDengine cannot meet your requirements, then you can use user-defined functions to solve the problem. This section is organized in the order described above. For ease of understanding, TDengine provides sample code for each supported programming language for each function. If you want to learn more about the use of SQL, please read the [SQL manual](/taos-sql/). For a more in-depth understanding of the use of each connector, please read the [Connector Reference Guide](/reference/connector/). If you also want to integrate TDengine with third-party systems, such as Grafana, please refer to the [third-party tools](/third-party/). diff --git a/docs-en/10-cluster/01-deploy.md b/docs-en/10-cluster/01-deploy.md index 8c921797ec038fb8afbf382a980b8f7a197fa898..200da1be3f8185818bd21dd3fcdc78c124a36831 100644 --- a/docs-en/10-cluster/01-deploy.md +++ b/docs-en/10-cluster/01-deploy.md @@ -6,29 +6,35 @@ title: Deployment ### Step 1 -The FQDN of all hosts need to be setup properly, all the FQDNs need to be configured in the /etc/hosts of each host. It must be guaranteed that each FQDN can be accessed (by ping, for example) from any other hosts. +The FQDN of all hosts must be setup properly. For e.g. FQDNs may have to be configured in the /etc/hosts file on each host. You must confirm that each FQDN can be accessed from any other host. For e.g. you can do this by using the `ping` command. -On each host command `hostname -f` can be executed to get the hostname. `ping` command can be executed on each host to check whether any other host is accessible from it. If any host is not accessible, the network configuration, like /etc/hosts or DNS configuration, need to be checked and revised to make any two hosts accessible to each other. +To get the hostname on any host, the command `hostname -f` can be executed. `ping ` command can be executed on each host to check whether any other host is accessible from it. If any host is not accessible, the network configuration, like /etc/hosts or DNS configuration, needs to be checked and revised, to make any two hosts accessible to each other. :::note -- The host where the client program runs also needs to configured properly for FQDN, to make sure all hosts for client or server can be accessed from any other. In other words, the hosts where the client is running are also considered as a part of the cluster. +- The host where the client program runs also needs to be configured properly for FQDN, to make sure all hosts for client or server can be accessed from any other. In other words, the hosts where the client is running are also considered as a part of the cluster. -- It's suggested to disable the firewall for all hosts in the cluster. At least TCP/UDP for port 6030~6042 need to be open if firewall is enabled. +- Please ensure that your firewall rules do not block TCP/UDP on ports 6030-6042 on all hosts in the cluster. ::: ### Step 2 -If any previous version of TDengine has been installed and configured on any host, the installation needs to be removed and the data needs to be cleaned up. For details about uninstalling please refer to [Install and Uninstall](/operation/pkg-install). To clean up the data, please use `rm -rf /var/lib/taos/\*` assuming the `dataDir` is configured as `/var/lib/taos`. +If any previous version of TDengine has been installed and configured on any host, the installation needs to be removed and the data needs to be cleaned up. For details about uninstalling please refer to [Install and Uninstall](/operation/pkg-install). To clean up the data, please use `rm -rf /var/lib/taos/\*` assuming the `dataDir` is configured as `/var/lib/taos`. + +:::note + +As a best practice, before cleaning up any data files or directories, please ensure that your data has been backed up correctly, if required by your data integrity, backup, security, or other standard operating protocols (SOP). + +::: ### Step 3 -Now it's time to install TDengine on all hosts without starting `taosd`, the versions on all hosts should be same. If it's prompted to input the existing TDengine cluster, simply press carriage return to ignore it. `install.sh -e no` can also be used to disable this prompt. For details please refer to [Install and Uninstall](/operation/pkg-install). +Now it's time to install TDengine on all hosts but without starting `taosd`. Note that the versions on all hosts should be same. If you are prompted to input the existing TDengine cluster, simply press carriage return to ignore the prompt. `install.sh -e no` can also be used to disable this prompt. For details please refer to [Install and Uninstall](/operation/pkg-install). ### Step 4 -Now each physical node (referred to as `dnode` hereinafter, it's abbreviation for "data node") of TDengine need to be configured properly. Please be noted that one dnode doesn't stand for one host, multiple TDengine nodes can be started on single host as long as they are configured properly without conflicting. More specifically each instance of the configuration file `taos.cfg` stands for a dnode. Assuming the first dnode of TDengine cluster is "h1.taosdata.com:6030", its `taos.cfg` is configured as following. +Now each physical node (referred to, hereinafter, as `dnode` which is an abbreviation for "data node") of TDengine needs to be configured properly. Please note that one dnode doesn't stand for one host. Multiple TDengine dnodes can be started on a single host as long as they are configured properly without conflicting. More specifically each instance of the configuration file `taos.cfg` stands for a dnode. Assuming the first dnode of TDengine cluster is "h1.taosdata.com:6030", its `taos.cfg` is configured as following. ```c // firstEp is the end point to connect to when any dnode starts @@ -44,9 +50,9 @@ serverPort 6030 #arbitrator ha.taosdata.com:6042 ``` -`firstEp` and `fqdn` must be configured properly. In `taos.cfg` of all dnodes in TDengine cluster, `firstEp` must be configured to point to same address, i.e. the first dnode of the cluster. `fqdn` and `serverPort` compose the address of each node itself. If you want to start multiple TDengine dnodes on a single host, please also make sure all other configurations like `dataDir`, `logDir`, and other resources related parameters are not conflicting. +`firstEp` and `fqdn` must be configured properly. In `taos.cfg` of all dnodes in TDengine cluster, `firstEp` must be configured to point to same address, i.e. the first dnode of the cluster. `fqdn` and `serverPort` compose the address of each node itself. If you want to start multiple TDengine dnodes on a single host, please make sure all other configurations like `dataDir`, `logDir`, and other resources related parameters are not conflicting. -For all the dnodes in a TDengine cluster, below parameters must be configured as exactly same, any node whose configuration is different from dnodes already in the cluster can't join the cluster. +For all the dnodes in a TDengine cluster, the below parameters must be configured exactly the same, any node whose configuration is different from dnodes already in the cluster can't join the cluster. | **#** | **Parameter** | **Definition** | | ----- | ------------------ | --------------------------------------------------------------------------------- | @@ -61,15 +67,17 @@ For all the dnodes in a TDengine cluster, below parameters must be configured as | 9 | maxVgroupsPerDb | Maximum number vgroups that can be used by each DB | :::note -Prior to version 2.0.19.0, besides the above parameters, `locale` and `charset` must be configured as same too for each dnode. +Prior to version 2.0.19.0, besides the above parameters, `locale` and `charset` must also be configured the same for each dnode. ::: ## Start Cluster +In the following example we assume that first dnode has FQDN h1.taosdata.com and the second dnode has FQDN h2.taosdata.com. + ### Start The First DNODE -The first dnode can be started following the instructions in [Get Started](/get-started/), for example h1.taosdata.com. Then TDengine CLI `taos` can be launched to execute command `show dnodes`, the output is as following for example: +The first dnode can be started following the instructions in [Get Started](/get-started/). Then TDengine CLI `taos` can be launched to execute command `show dnodes`, the output is as following for example: ``` Welcome to the TDengine shell from Linux, Client Version:2.0.0.0 @@ -80,27 +88,41 @@ Copyright (c) 2017 by TAOS Data, Inc. All rights reserved. taos> show dnodes; id | end_point | vnodes | cores | status | role | create_time | ===================================================================================== - 1 | h1.taos.com:6030 | 0 | 2 | ready | any | 2020-07-31 03:49:29.202 | + 1 | h1.taosdata.com:6030 | 0 | 2 | ready | any | 2020-07-31 03:49:29.202 | Query OK, 1 row(s) in set (0.006385s) taos> ``` -From the above output, it is shown that the end point of the started dnode is "h1.taos.com:6030", which is the `firstEp` of the cluster. +From the above output, it is shown that the end point of the started dnode is "h1.taosdata.com:6030", which is the `firstEp` of the cluster. ### Start Other DNODEs There are a few steps necessary to add other dnodes in the cluster. -Firstly, start `taosd` as instructed in [Get Started](/get-started/), assuming it's for the second dnode. Before starting `taosd`, please making sure the configuration is correct, especially `firstEp`, `FQDN` and `serverPort`, `firstEp` must be same as the dnode shown in the section "Start First DNODE", i.e. "h1.taosdata.com" in this example. +Let's assume we are starting the second dnode with FQDN, h2.taosdata.com. First we make sure the configuration is correct. + +```c +// firstEp is the end point to connect to when any dnode starts +firstEp h1.taosdata.com:6030 + +// must be configured to the FQDN of the host where the dnode is launched +fqdn h2.taosdata.com + +// the port used by the dnode, default is 6030 +serverPort 6030 + +``` + +Second, we can start `taosd` as instructed in [Get Started](/get-started/). -Then, on the first dnode, use TDengine CLI `taos` to execute below command to add the end point of the dnode in the cluster. In the command "fqdn:port" should be quoted using double quotes. +Then, on the first dnode i.e. h1.taosdata.com in our example, use TDengine CLI `taos` to execute the following command to add the end point of the dnode in the cluster. In the command "fqdn:port" should be quoted using double quotes. ```sql CREATE DNODE "h2.taos.com:6030"; ``` -Then on the first dnode, execute `show dnodes` in `taos` to show whether the second dnode has been added in the cluster successfully or not. +Then on the first dnode h1.taosdata.com, execute `show dnodes` in `taos` to show whether the second dnode has been added in the cluster successfully or not. ```sql SHOW DNODES; @@ -109,6 +131,6 @@ SHOW DNODES; If the status of the newly added dnode is offline, please check: - Whether the `taosd` process is running properly or not -- In the log file `taosdlog.0` to see whether the fqdn and port are correct or not +- In the log file `taosdlog.0` to see whether the fqdn and port are correct The above process can be repeated to add more dnodes in the cluster. diff --git a/docs-en/10-cluster/02-cluster-mgmt.md b/docs-en/10-cluster/02-cluster-mgmt.md index 3fcd68b29ce08519af9a0cde11d5361c6b4cd312..674c92e2766a4eb304079140af19c8efea72d55e 100644 --- a/docs-en/10-cluster/02-cluster-mgmt.md +++ b/docs-en/10-cluster/02-cluster-mgmt.md @@ -3,16 +3,16 @@ sidebar_label: Operation title: Manage DNODEs --- -It has been introduced that how to deploy and start a cluster from scratch. Once a cluster is ready, the dnode status in the cluster can be shown at any time, new dnode can be added to scale out the cluster, an existing dnode can be removed, even load balance can be performed manually.\ +The previous section, [Deployment],(/cluster/deploy) showed you how to deploy and start a cluster from scratch. Once a cluster is ready, the status of dnode(s) in the cluster can be shown at any time. Dnodes can be managed from the TDengine CLI. New dnode(s) can be added to scale out the cluster, an existing dnode can be removed and you can even perform load balancing manually, if necessary. :::note -All the commands to be introduced in this chapter need to be run through TDengine CLI, sometimes it's necessary to use root privilege. +All the commands introduced in this chapter must be run in the TDengine CLI - `taos`. Note that sometimes it is necessary to use root privilege. ::: ## Show DNODEs -below command can be executed in TDengine CLI `taos` to list all dnodes in the cluster, including ID, end point (fqdn:port), status (ready, offline), number of vnodes, number of free vnodes, etc. It's suggested to execute this command to check after adding or removing a dnode. +The below command can be executed in TDengine CLI `taos` to list all dnodes in the cluster, including ID, end point (fqdn:port), status (ready, offline), number of vnodes, number of free vnodes and so on. We recommend executing this command after adding or removing a dnode. ```sql SHOW DNODES; @@ -30,7 +30,7 @@ Query OK, 1 row(s) in set (0.008298s) ## Show VGROUPs -To utilize system resources efficiently and provide scalability, data sharding is required. The data of each database is divided into multiple shards and stored in multiple vnodes. These vnodes may be located in different dnodes, scaling out can be achieved by adding more vnodes from more dnodes. Each vnode can only be used for a single DB, but one DB can have multiple vnodes. The allocation of vnode is scheduled automatically by mnode according to system resources of the dnodes. +To utilize system resources efficiently and provide scalability, data sharding is required. The data of each database is divided into multiple shards and stored in multiple vnodes. These vnodes may be located on different dnodes. One way of scaling out is to add more vnodes on dnodes. Each vnode can only be used for a single DB, but one DB can have multiple vnodes. The allocation of vnode is scheduled automatically by mnode based on system resources of the dnodes. Launch TDengine CLI `taos` and execute below command: @@ -39,7 +39,7 @@ USE SOME_DATABASE; SHOW VGROUPS; ``` -The example output is as below: +The example output is below: ``` taos> show dnodes; @@ -87,7 +87,7 @@ taos> show dnodes; Query OK, 2 row(s) in set (0.001017s) ``` -It can be seen that the status of the new dnode is "offline", once the dnode is started and connects the firstEp of the cluster, execute the command again and get below example output, from which it can be seen that two dnodes are both in "ready" status. +It can be seen that the status of the new dnode is "offline". Once the dnode is started and connects to the firstEp of the cluster, you can execute the command again and get the example output below. As can be seen, both dnodes are in "ready" status. ``` taos> show dnodes; @@ -100,7 +100,7 @@ Query OK, 2 row(s) in set (0.001316s) ## Drop DNODE -Launch TDengine CLI `taos` and execute the command below to drop or remove a dnode from the cluster. In the command, `dnodeId` can be gotten from `show dnodes`. +Launch TDengine CLI `taos` and execute the command below to drop or remove a dnode from the cluster. In the command, you can get `dnodeId` from `show dnodes`. ```sql DROP DNODE "fqdn:port"; @@ -112,7 +112,7 @@ or DROP DNODE dnodeId; ``` -The example output is as below: +The example output is below: ``` taos> show dnodes; @@ -132,14 +132,14 @@ taos> show dnodes; Query OK, 1 row(s) in set (0.001137s) ``` -In the above example, when `show dnodes` is executed the first time, two dnodes are shown. Then `drop dnode 2` is executed, after that from the output of executing `show dnodes` again it can be seen that only the dnode with ID 1 is still in the cluster. +In the above example, when `show dnodes` is executed the first time, two dnodes are shown. After `drop dnode 2` is executed, you can execute `show dnodes` again and it can be seen that only the dnode with ID 1 is still in the cluster. :::note -- Once a dnode is dropped, it can't rejoin the cluster. To rejoin, the dnode needs to deployed again after cleaning up the data directory. Normally, before dropping a dnode, the data belonging to the dnode needs to be migrated to other place. -- Please be noted that `drop dnode` is different from stopping `taosd` process. `drop dnode` just removes the dnode out of TDengine cluster. Only after a dnode is dropped, can the corresponding `taosd` process be stopped. +- Once a dnode is dropped, it can't rejoin the cluster. To rejoin, the dnode needs to deployed again after cleaning up the data directory. Before dropping a dnode, the data belonging to the dnode MUST be migrated/backed up according to your data retention, data security or other SOPs. +- Please note that `drop dnode` is different from stopping `taosd` process. `drop dnode` just removes the dnode out of TDengine cluster. Only after a dnode is dropped, can the corresponding `taosd` process be stopped. - Once a dnode is dropped, other dnodes in the cluster will be notified of the drop and will not accept the request from the dropped dnode. -- dnodeID is allocated automatically and can't be interfered manually. dnodeID is generated in ascending order without duplication. +- dnodeID is allocated automatically and can't be manually modified. dnodeID is generated in ascending order without duplication. ::: @@ -155,7 +155,7 @@ ALTER DNODE BALANCE "VNODE:-DNODE:"; In the above command, `source-dnodeId` is the original dnodeId where the vnode resides, `dest-dnodeId` specifies the target dnode. vgId (vgroup ID) can be shown by `SHOW VGROUPS `. -Firstly `show vgroups` is executed to show the vgroup distribution. +First `show vgroups` is executed to show the vgroup distribution. ``` taos> show vgroups; @@ -172,7 +172,7 @@ taos> show vgroups; Query OK, 8 row(s) in set (0.001314s) ``` -It can be seen that there are 5 vgroups in dnode 3 and 3 vgroups in node 1, now we want to move vgId 18 from dnode 3 to dnode 1. Execute below command in `taos` +It can be seen that there are 5 vgroups in dnode 3 and 3 vgroups in node 1, now we want to move vgId 18 from dnode 3 to dnode 1. Execute the below command in `taos` ``` taos> alter dnode 3 balance "vnode:18-dnode:1"; @@ -207,7 +207,7 @@ It can be seen from above output that vgId 18 has been moved from dnode 3 to dno :::note - Manual load balancing can only be performed when the automatic load balancing is disabled, i.e. `balance` is set to 0. -- Only vnode in normal state, i.e. master or slave, can be moved. vnode can't moved when its in status offline, unsynced or syncing. +- Only a vnode in normal state, i.e. master or slave, can be moved. vnode can't be moved when its in status offline, unsynced or syncing. - Before moving a vnode, it's necessary to make sure the target dnode has enough resources: CPU, memory and disk. ::: diff --git a/docs-en/10-cluster/03-ha-and-lb.md b/docs-en/10-cluster/03-ha-and-lb.md index 53c95be9e995a728b2b4053e4f204df58271716e..bd718eef9f8dc181628132de831dbca2af59d158 100644 --- a/docs-en/10-cluster/03-ha-and-lb.md +++ b/docs-en/10-cluster/03-ha-and-lb.md @@ -7,44 +7,45 @@ title: High Availability and Load Balancing High availability of vnode and mnode can be achieved through replicas in TDengine. -The number of vnodes is associated with each DB, there can be multiple DBs in a TDengine cluster. For the purpose of operation, different number of replicas can be configured properly for each DB. When creating a database, the parameter `replica` is used to specify the number of replicas, the default value is 1. With single replica, the high availability of the system can't be guaranteed. Whenever one node is down, data service would be unavailable. The number of dnodes in the cluster must NOT be lower than the number of replicas set for any DB, otherwise the `create table` operation would fail with error "more dnodes are needed". Below SQL statement is used to create a database named as "demo" with 3 replicas. +A TDengine cluster can have multiple databases. Each database has a number of vnodes associated with it. A different number of replicas can be configured for each DB. When creating a database, the parameter `replica` is used to specify the number of replicas. The default value for `replica` is 1. Naturally, a single replica cannot guarantee high availability since if one node is down, the data service is unavailable. Note that the number of dnodes in the cluster must NOT be lower than the number of replicas set for any DB, otherwise the `create table` operation will fail with error "more dnodes are needed". The SQL statement below is used to create a database named "demo" with 3 replicas. ```sql CREATE DATABASE demo replica 3; ``` -The data in a DB is divided into multiple shards and stored in multiple vgroups. The number of vnodes in each group is determined by the number of replicas set for the DB. The vnodes in each vgroups store exactly same data. For the purpose of high availability, the vnodes in a vgroup must be located in different dnodes on different hosts. As long as over half of the vnodes in a vgroup are in online state, the vgroup is able to serve data access. Otherwise the vgroup can't handle any data access for reading or inserting data. +The data in a DB is divided into multiple shards and stored in multiple vgroups. The number of vnodes in each vgroup is determined by the number of replicas set for the DB. The vnodes in each vgroup store exactly the same data. For the purpose of high availability, the vnodes in a vgroup must be located in different dnodes on different hosts. As long as over half of the vnodes in a vgroup are in an online state, the vgroup is able to provide data access. Otherwise the vgroup can't provide data access for reading or inserting data. -There may be data for multiple DBs in a dnode. Once a dnode is down, multiple DBs may be affected. However, it's hard to say the cluster is guaranteed to work properly as long as over half of dnodes are online because vnodes are introduced and there may be complex mapping between vnodes and dnodes. +There may be data for multiple DBs in a dnode. When a dnode is down, multiple DBs may be affected. While in theory, the cluster will provide data access for reading or inserting data if over half the vnodes in vgroups are online, because of the possibly complex mapping between vnodes and dnodes, it is difficult to guarantee that the cluster will work properly if over half of the dnodes are online. ## High Availability of Mnode -Each TDengine cluster is managed by `mnode`, which is a module of `taosd`. For the high availability of mnode, multiple mnodes can be configured using system parameter `numOfMNodes`, the valid time range is [1,3]. To make sure the data consistency between mnodes, the data replication between mnodes is performed in synchronous way. +Each TDengine cluster is managed by `mnode`, which is a module of `taosd`. For the high availability of mnode, multiple mnodes can be configured using system parameter `numOfMNodes`. The valid range for `numOfMnodes` is [1,3]. To ensure data consistency between mnodes, data replication between mnodes is performed synchronously. -There may be multiple dnodes in a cluster, but only one mnode can be started in each dnode. Which one or ones of the dnodes will be designated as mnodes is automatically determined by TDengine according to the cluster configuration and system resources. Command `show mnodes` can be executed in TDengine `taos` to show the mnodes in the cluster. +There may be multiple dnodes in a cluster, but only one mnode can be started in each dnode. Which one or ones of the dnodes will be designated as mnodes is automatically determined by TDengine according to the cluster configuration and system resources. The command `show mnodes` can be executed in TDengine `taos` to show the mnodes in the cluster. ```sql SHOW MNODES; ``` -The end point and role/status (master, slave, unsynced, or offline) of all mnodes can be shown by the above command. When the first dnode is started in a cluster, there must be one mnode in this dnode, because there must be at least one mnode otherwise the cluster doesn't work. If `numOfMNodes` is configured to 2, another mnode will be started when the second dnode is launched. +The end point and role/status (master, slave, unsynced, or offline) of all mnodes can be shown by the above command. When the first dnode is started in a cluster, there must be one mnode in this dnode. Without at least one mnode, the cluster cannot work. If `numOfMNodes` is configured to 2, another mnode will be started when the second dnode is launched. For the high availability of mnode, `numOfMnodes` needs to be configured to 2 or a higher value. Because the data consistency between mnodes must be guaranteed, the replica confirmation parameter `quorum` is set to 2 automatically if `numOfMNodes` is set to 2 or higher. :::note -If high availability is important for your system, both vnode and mnode must be configured to have multiple replicas. How to configure for them are different and have been described. +If high availability is important for your system, both vnode and mnode must be configured to have multiple replicas. ::: -## Load Balance +## Load Balancing -Load balance will be triggered in 3 cades without manual intervention. +Load balancing will be triggered in 3 cases without manual intervention. -- When a new dnode is joined in the cluster, automatic load balancing may be triggered, some data from some dnodes may be transferred to the new dnode automatically. +- When a new dnode joins the cluster, automatic load balancing may be triggered. Some data from other dnodes may be transferred to the new dnode automatically. - When a dnode is removed from the cluster, the data from this dnode will be transferred to other dnodes automatically. - When a dnode is too hot, i.e. too much data has been stored in it, automatic load balancing may be triggered to migrate some vnodes from this dnode to other dnodes. -- :::tip - Automatic load balancing is controlled by parameter `balance`, 0 means disabled and 1 means enabled. + +:::tip +Automatic load balancing is controlled by the parameter `balance`, 0 means disabled and 1 means enabled. This is set in the file [taos.cfg](https://docs.tdengine.com/reference/config/#balance). ::: @@ -52,26 +53,26 @@ Load balance will be triggered in 3 cades without manual intervention. When a dnode is offline, it can be detected by the TDengine cluster. There are two cases: -- The dnode becomes online again before the threshold configured in `offlineThreshold` is reached, it is still in the cluster and data replication is started automatically. The dnode can work properly after the data syncup is finished. +- The dnode comes online before the threshold configured in `offlineThreshold` is reached. The dnode is still in the cluster and data replication is started automatically. The dnode can work properly after the data sync is finished. -- If the dnode has been offline over the threshold configured in `offlineThreshold` in `taos.cfg`, the dnode will be removed from the cluster automatically. System alert will be generated and automatic load balancing will be triggered too if `balance` is set to 1. When the removed dnode is restarted and becomes online, it will not be joined in the cluster automatically, it can only be joined manually by the system operator. +- If the dnode has been offline over the threshold configured in `offlineThreshold` in `taos.cfg`, the dnode will be removed from the cluster automatically. A system alert will be generated and automatic load balancing will be triggered if `balance` is set to 1. When the removed dnode is restarted and becomes online, it will not join the cluster automatically. The system administrator has to manually join the dnode to the cluster. :::note -If all the vnodes in a vgroup (or mnodes in mnode group) are in offline or unsynced status, the master node can only be voted after all the vnodes or mnodes in the group become online and can exchange status, then the vgroup (or mnode group) is able to provide service. +If all the vnodes in a vgroup (or mnodes in mnode group) are in offline or unsynced status, the master node can only be voted on, after all the vnodes or mnodes in the group become online and can exchange status. Following this, the vgroup (or mnode group) is able to provide service. ::: ## Arbitrator -If the number of replicas is set to an even number like 2, when half of the vnodes in a vgroup don't work master node can't be voted. Similar case is also applicable to mnode if the number of mnodes is set to an even number like 2. +The "arbitrator" component is used to address the special case when the number of replicas is set to an even number like 2,4 etc. If half of the vnodes in a vgroup don't work, it is impossible to vote and select a master node. This situation also applies to mnodes if the number of mnodes is set to an even number like 2,4 etc. -To resolve this problem, a new arbitrator component named `tarbitrator`, abbreviated for TDengine Arbitrator, was introduced. Arbitrator simulates a vnode or mnode but it's only responsible for network communication and doesn't handle any actual data access. With Arbitrator, any vgroup or mnode group can be considered as having number of member nodes and master node can be selected. +To resolve this problem, a new arbitrator component named `tarbitrator`, an abbreviation of TDengine Arbitrator, was introduced. The `tarbitrator` simulates a vnode or mnode but it's only responsible for network communication and doesn't handle any actual data access. As long as more than half of the vnode or mnode, including Arbitrator, are available the vnode group or mnode group can provide data insertion or query services normally. -Normally, it's suggested to configure replica number of each DB or system parameter `numOfMNodes` to an odd number. However, if a user is very sensitive to storage space, replica number of 2 plus arbitrator component can be used to achieve both lower cost of storage space and high availability. +Normally, it's prudent to configure the replica number for each DB or system parameter `numOfMNodes` to be an odd number. However, if a user is very sensitive to storage space, a replica number of 2 plus arbitrator component can be used to achieve both lower cost of storage space and high availability. Arbitrator component is installed with the server package. For details about how to install, please refer to [Install](/operation/pkg-install). The `-p` parameter of `tarbitrator` can be used to specify the port on which it provides service. -In the configuration file `taos.cfg` of each dnode, parameter `arbitrator` needs to be configured to the end point of the `tarbitrator` process. arbitrator component will be used automatically if the replica is configured to an even number and will be ignored if the replica is configured to an odd number. +In the configuration file `taos.cfg` of each dnode, parameter `arbitrator` needs to be configured to the end point of the `tarbitrator` process. Arbitrator component will be used automatically if the replica is configured to an even number and will be ignored if the replica is configured to an odd number. Arbitrator can be shown by executing command in TDengine CLI `taos` with its role shown as "arb". diff --git a/docs-en/10-cluster/index.md b/docs-en/10-cluster/index.md index a19a54e01d5a6429e95958c2544072961b0cb66a..5a45a2ce7b08c67322265cf1bbd54ef66cbfc027 100644 --- a/docs-en/10-cluster/index.md +++ b/docs-en/10-cluster/index.md @@ -3,7 +3,7 @@ title: Cluster keywords: ["cluster", "high availability", "load balance", "scale out"] --- -TDengine has a native distributed design and provides the ability to scale out. A few of nodes can form a TDengine cluster. If you need to get higher processing power, you just need to add more nodes into the cluster. TDengine uses virtual node technology to virtualize a node into multiple virtual nodes to achieve load balancing. At the same time, TDengine can group virtual nodes on different nodes into virtual node groups, and use the replication mechanism to ensure the high availability of the system. The cluster feature of TDengine is completely open source. +TDengine has a native distributed design and provides the ability to scale out. A few nodes can form a TDengine cluster. If you need higher processing power, you just need to add more nodes into the cluster. TDengine uses virtual node technology to virtualize a node into multiple virtual nodes to achieve load balancing. At the same time, TDengine can group virtual nodes on different nodes into virtual node groups, and use the replication mechanism to ensure the high availability of the system. The cluster feature of TDengine is completely open source. This chapter mainly introduces cluster deployment, maintenance, and how to achieve high availability and load balancing. diff --git a/docs-en/12-taos-sql/01-data-type.md b/docs-en/12-taos-sql/01-data-type.md index 931e3bbac7f0601a9de79d0dfa04ffc94ecced96..3f5a49e3135771c6c1e62bcf158a99ee30f1ed9d 100644 --- a/docs-en/12-taos-sql/01-data-type.md +++ b/docs-en/12-taos-sql/01-data-type.md @@ -1,23 +1,23 @@ --- title: Data Types -description: "The data types supported by TDengine include timestamp, float, JSON, etc" +description: "TDengine supports a variety of data types including timestamp, float, JSON and many others." --- -When using TDengine to store and query data, the most important part of the data is timestamp. Timestamp must be specified when creating and inserting data rows or querying data, timestamp must follow below rules: +When using TDengine to store and query data, the most important part of the data is timestamp. Timestamp must be specified when creating and inserting data rows. Timestamp must follow the rules below: -- the format must be `YYYY-MM-DD HH:mm:ss.MS`, the default time precision is millisecond (ms), for example `2017-08-12 18:25:58.128` -- internal function `now` can be used to get the current timestamp of the client side -- the current timestamp of the client side is applied when `now` is used to insert data +- The format must be `YYYY-MM-DD HH:mm:ss.MS`, the default time precision is millisecond (ms), for example `2017-08-12 18:25:58.128` +- Internal function `now` can be used to get the current timestamp on the client side +- The current timestamp of the client side is applied when `now` is used to insert data - Epoch Time:timestamp can also be a long integer number, which means the number of seconds, milliseconds or nanoseconds, depending on the time precision, from 1970-01-01 00:00:00.000 (UTC/GMT) -- timestamp can be applied with add/subtract operation, for example `now-2h` means 2 hours back from the time at which query is executed,the unit can be b(nanosecond), u(microsecond), a(millisecond), s(second), m(minute), h(hour), d(day), w(week.。 So `select * from t1 where ts > now-2w and ts <= now-1w` means the data between two weeks ago and one week ago. The time unit can also be n (calendar month) or y (calendar year) when specifying the time window for down sampling operation. +- Add/subtract operations can be carried out on timestamps. For example `now-2h` means 2 hours prior to the time at which query is executed. The units of time in operations can be b(nanosecond), u(microsecond), a(millisecond), s(second), m(minute), h(hour), d(day), or w(week). So `select * from t1 where ts > now-2w and ts <= now-1w` means the data between two weeks ago and one week ago. The time unit can also be n (calendar month) or y (calendar year) when specifying the time window for down sampling operations. -Time precision in TDengine can be set by the `PRECISION` parameter when executing `CREATE DATABASE`, like below, the default time precision is millisecond. +Time precision in TDengine can be set by the `PRECISION` parameter when executing `CREATE DATABASE`. The default time precision is millisecond. In the statement below, the precision is set to nanonseconds. ```sql CREATE DATABASE db_name PRECISION 'ns'; ``` -In TDengine, below data types can be used when specifying a column or tag. +In TDengine, the data types below can be used when specifying a column or tag. | # | **type** | **Bytes** | **Description** | | --- | :-------: | --------- | ------------------------- | @@ -25,13 +25,13 @@ In TDengine, below data types can be used when specifying a column or tag. | 2 | INT | 4 | Integer, the value range is [-2^31+1, 2^31-1], while -2^31 is treated as NULL | | 3 | BIGINT | 8 | Long integer, the value range is [-2^63+1, 2^63-1], while -2^63 is treated as NULL | | 4 | FLOAT | 4 | Floating point number, the effective number of digits is 6-7, the value range is [-3.4E38, 3.4E38] | -| 5 | DOUBLE | 8 | double precision floating point number, the effective number of digits is 15-16, the value range is [-1.7E308, 1.7E308] | +| 5 | DOUBLE | 8 | Double precision floating point number, the effective number of digits is 15-16, the value range is [-1.7E308, 1.7E308] | | 6 | BINARY | User Defined | Single-byte string for ASCII visible characters. Length must be specified when defining a column or tag of binary type. The string length can be up to 16374 bytes. The string value must be quoted with single quotes. The literal single quote inside the string must be preceded with back slash like `\'` | | 7 | SMALLINT | 2 | Short integer, the value range is [-32767, 32767], while -32768 is treated as NULL | | 8 | TINYINT | 1 | Single-byte integer, the value range is [-127, 127], while -128 is treated as NULL | | 9 | BOOL | 1 | Bool, the value range is {true, false} | -| 10 | NCHAR | User Defined| Multiple-Byte string that can include like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\’`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. Error will be reported the string value exceeds the length defined. | -| 11 | JSON | | json type can only be used on tag, a tag of json type is excluded with any other tags of any other type | +| 10 | NCHAR | User Defined| Multi-Byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\’`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. | +| 11 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type | :::tip TDengine is case insensitive and treats any characters in the sql command as lower case by default, case sensitive strings must be quoted with single quotes. @@ -39,7 +39,7 @@ TDengine is case insensitive and treats any characters in the sql command as low ::: :::note -Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multiple-byte characters must be stored in NCHAR type. +Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type. ::: diff --git a/docs-en/12-taos-sql/02-database.md b/docs-en/12-taos-sql/02-database.md index 12e2edf8bae21059e8c2d5c18858d502c834e9c1..80581b2f1bc7ce9cd046c18873d3f22b6804d8cf 100644 --- a/docs-en/12-taos-sql/02-database.md +++ b/docs-en/12-taos-sql/02-database.md @@ -4,7 +4,7 @@ title: Database description: "create and drop database, show or change database parameters" --- -## Create Datable +## Create Database ``` CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1]; @@ -12,11 +12,11 @@ CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1]; :::info -1. KEEP specifies the number of days for which the data in the database to be created will be kept, the default value is 3650 days, i.e. 10 years. The data will be deleted automatically once its age exceeds this threshold. +1. KEEP specifies the number of days for which the data in the database will be retained. The default value is 3650 days, i.e. 10 years. The data will be deleted automatically once its age exceeds this threshold. 2. UPDATE specifies whether the data can be updated and how the data can be updated. - 1. UPDATE set to 0 means update operation is not allowed, the data with an existing timestamp will be dropped silently. - 2. UPDATE set to 1 means the whole row will be updated, the columns for which no value is specified will be set to NULL - 3. UPDATE set to 2 means updating a part of columns for a row is allowed, the columns for which no value is specified will be kept as no change + 1. UPDATE set to 0 means update operation is not allowed. The update for data with an existing timestamp will be discarded silently and the original record in the database will be preserved as is. + 2. UPDATE set to 1 means the whole row will be updated. The columns for which no value is specified will be set to NULL. + 3. UPDATE set to 2 means updating a subset of columns for a row is allowed. The columns for which no value is specified will be kept unchanged. 3. The maximum length of database name is 33 bytes. 4. The maximum length of a SQL statement is 65,480 bytes. 5. Below are the parameters that can be used when creating a database @@ -34,8 +34,8 @@ CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1]; - quorum: [Description](/reference/config/#quorum) - maxVgroupsPerDb: [Description](/reference/config/#maxvgroupsperdb) - comp: [Description](/reference/config/#comp) - - precision: [Description](reference/config/#precision) -6. Please be noted that all of the parameters mentioned in this section can be configured in configuration file `taosd.cfg` at server side and used by default, can be override if they are specified in `create database` statement. + - precision: [Description](/reference/config/#precision) +6. Please note that all of the parameters mentioned in this section are configured in configuration file `taos.cfg` on the TDengine server. If not specified in the `create database` statement, the values from taos.cfg are used by default. To override default parameters, they must be specified in the `create database` statement. ::: @@ -52,7 +52,7 @@ USE db_name; ``` :::note -This way is not applicable when using a REST connection +This way is not applicable when using a REST connection. In a REST connection the database name must be specified before a table or stable name. For e.g. to query the stable "meters" in database "test" the query would be "SELECT count(*) from test.meters" ::: @@ -63,13 +63,13 @@ DROP DATABASE [IF EXISTS] db_name; ``` :::note -All data in the database will be deleted too. This command must be used with caution. +All data in the database will be deleted too. This command must be used with extreme caution. Please follow your organization's data integrity, data backup, data security or any other applicable SOPs before using this command. ::: ## Change Database Configuration -Some examples are shown below to demonstrate how to change the configuration of a database. Please be noted that some configuration parameters can be changed after the database is created, but some others can't, for details of the configuration parameters of database please refer to [Configuration Parameters](/reference/config/). +Some examples are shown below to demonstrate how to change the configuration of a database. Please note that some configuration parameters can be changed after the database is created, but some cannot. For details of the configuration parameters of database please refer to [Configuration Parameters](/reference/config/). ``` ALTER DATABASE db_name COMP 2; @@ -81,7 +81,7 @@ COMP parameter specifies whether the data is compressed and how the data is comp ALTER DATABASE db_name REPLICA 2; ``` -REPLICA parameter specifies the number of replications of the database. +REPLICA parameter specifies the number of replicas of the database. ``` ALTER DATABASE db_name KEEP 365; @@ -124,4 +124,4 @@ SHOW DATABASES; SHOW CREATE DATABASE db_name; ``` -This command is useful when migrating the data from one TDengine cluster to another one. Firstly this command can be used to get the CREATE statement, which in turn can be used in another TDengine to create an exactly same database. +This command is useful when migrating the data from one TDengine cluster to another. This command can be used to get the CREATE statement, which can be used in another TDengine instance to create the exact same database. diff --git a/docs-en/12-taos-sql/03-table.md b/docs-en/12-taos-sql/03-table.md index a1524f45f98e8435425a9a937b7f6dc4431b6e06..f065a8e2396583bb7a512446b513ed60056ad55e 100644 --- a/docs-en/12-taos-sql/03-table.md +++ b/docs-en/12-taos-sql/03-table.md @@ -12,12 +12,12 @@ CREATE TABLE [IF NOT EXISTS] tb_name (timestamp_field_name TIMESTAMP, field1_nam :::info -1. The first column of a table must be in TIMESTAMP type, and it will be set as primary key automatically -2. The maximum length of table name is 192 bytes. -3. The maximum length of each row is 16k bytes, please be notes that the extra 2 bytes used by each BINARY/NCHAR column are also counted in. -4. The name of sub-table can only be consisted of English characters, digits and underscore, and can't be started with digit. Table names are case insensitive. -5. The maximum length in bytes must be specified when using BINARY or NCHAR type. -6. Escape character "\`" can be used to avoid the conflict between table names and reserved keywords, above rules will be bypassed when using escape character on table names, but the upper limit for name length is still valid. The table names specified using escape character are case sensitive. Only ASCII visible characters can be used with escape character. +1. The first column of a table MUST be of type TIMESTAMP. It is automatically set as the primary key. +2. The maximum length of the table name is 192 bytes. +3. The maximum length of each row is 48k bytes, please note that the extra 2 bytes used by each BINARY/NCHAR column are also counted. +4. The name of the subtable can only consist of characters from the English alphabet, digits and underscore. Table names can't start with a digit. Table names are case insensitive. +5. The maximum length in bytes must be specified when using BINARY or NCHAR types. +6. Escape character "\`" can be used to avoid the conflict between table names and reserved keywords, above rules will be bypassed when using escape character on table names, but the upper limit for the name length is still valid. The table names specified using escape character are case sensitive. Only ASCII visible characters can be used with escape character. For example \`aBc\` and \`abc\` are different table names but `abc` and `aBc` are same table names because they are both converted to `abc` internally. ::: @@ -28,9 +28,9 @@ CREATE TABLE [IF NOT EXISTS] tb_name (timestamp_field_name TIMESTAMP, field1_nam CREATE TABLE [IF NOT EXISTS] tb_name USING stb_name TAGS (tag_value1, ...); ``` -The above command creates a subtable using the specified super table as template and the specified tab values. +The above command creates a subtable using the specified super table as a template and the specified tag values. -### Create Subtable Using STable As Template With A Part of Tags +### Create Subtable Using STable As Template With A Subset of Tags ``` CREATE TABLE [IF NOT EXISTS] tb_name USING stb_name (tag_name1, ...) TAGS (tag_value1, ...); @@ -44,11 +44,11 @@ The tags for which no value is specified will be set to NULL. CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) [IF NOT EXISTS] tb_name2 USING stb_name TAGS (tag_value2, ...) ...; ``` -This way can be used to create a lot of tables in a single SQL statement to accelerate the speed of the creating tables. +This can be used to create a lot of tables in a single SQL statement while making table creation much faster. :::info -- Creating tables in batch must use super table as template. +- Creating tables in batch must use a super table as a template. - The length of single statement is suggested to be between 1,000 and 3,000 bytes for best performance. ::: @@ -71,7 +71,7 @@ SHOW TABLES [LIKE tb_name_wildcard]; SHOW CREATE TABLE tb_name; ``` -This way is useful when migrating the data in one TDengine cluster to another one because it can be used to create exactly same tables in the target database. +This is useful when migrating the data in one TDengine cluster to another one because it can be used to create the exact same tables in the target database. ## Show Table Definition @@ -90,7 +90,7 @@ ALTER TABLE tb_name ADD COLUMN field_name data_type; :::info 1. The maximum number of columns is 4096, the minimum number of columns is 2. -2. The maximum length of column name is 64 bytes. +2. The maximum length of a column name is 64 bytes. ::: @@ -101,7 +101,7 @@ ALTER TABLE tb_name DROP COLUMN field_name; ``` :::note -If a table is created using a super table as template, the table definition can only be changed on the corresponding super table, but the change will be automatically applied to all the sub tables created using this super table as template. For tables created in normal way, the table definition can be changed directly on the table. +If a table is created using a super table as template, the table definition can only be changed on the corresponding super table, and the change will be automatically applied to all the subtables created using this super table as template. For tables created in the normal way, the table definition can be changed directly on the table. ::: @@ -111,10 +111,10 @@ If a table is created using a super table as template, the table definition can ALTER TABLE tb_name MODIFY COLUMN field_name data_type(length); ``` -The the type of a column is variable length, like BINARY or NCHAR, this way can be used to change (or increase) the length of the column. +If the type of a column is variable length, like BINARY or NCHAR, this command can be used to change the length of the column. :::note -If a table is created using a super table as template, the table definition can only be changed on the corresponding super table, but the change will be automatically applied to all the sub tables created using this super table as template. For tables created in normal way, the table definition can be changed directly on the table. +If a table is created using a super table as template, the table definition can only be changed on the corresponding super table, and the change will be automatically applied to all the subtables created using this super table as template. For tables created in the normal way, the table definition can be changed directly on the table. ::: diff --git a/docs-en/12-taos-sql/04-stable.md b/docs-en/12-taos-sql/04-stable.md index b7817f90287a6415bee020fb5adc8e6239cc6da4..b8a608792ab327a81129d29ddd0ff44d7af6e6c5 100644 --- a/docs-en/12-taos-sql/04-stable.md +++ b/docs-en/12-taos-sql/04-stable.md @@ -9,20 +9,20 @@ Keyword `STable`, abbreviated for super table, is supported since version 2.0.15 ::: -## Crate STable +## Create STable ``` CREATE STable [IF NOT EXISTS] stb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]) TAGS (tag1_name tag_type1, tag2_name tag_type2 [, tag3_name tag_type3]); ``` -The SQL statement of creating STable is similar to that of creating table, but a special column named as `TAGS` must be specified with the names and types of the tags. +The SQL statement of creating a STable is similar to that of creating a table, but a special column set named `TAGS` must be specified with the names and types of the tags. :::info -1. The tag types specified in TAGS should NOT be timestamp. Since 2.1.3.0 timestamp type can be used in TAGS column, but its value must be fixed and arithmetic operation can't be applied on it. -2. The tag names specified in TAGS should NOT be same as other columns. -3. The tag names specified in TAGS should NOT be same as any reserved keywords.(Please refer to [keywords](/taos-sql/keywords/) -4. The maximum number of tags specified in TAGS is 128, but there must be at least one tag, and the total length of all tag columns should NOT exceed 16KB. +1. A tag can be of type timestamp, since version 2.1.3.0, but its value must be fixed and arithmetic operations cannot be performed on it. Prior to version 2.1.3.0, tag types specified in TAGS could not be of type timestamp. +2. The tag names specified in TAGS should NOT be the same as other columns. +3. The tag names specified in TAGS should NOT be the same as any reserved keywords.(Please refer to [keywords](/taos-sql/keywords/) +4. The maximum number of tags specified in TAGS is 128, there must be at least one tag, and the total length of all tag columns should NOT exceed 16KB. ::: @@ -32,7 +32,7 @@ The SQL statement of creating STable is similar to that of creating table, but a DROP STable [IF EXISTS] stb_name; ``` -All the sub-tables created using the deleted STable will be deleted automatically. +All the subtables created using the deleted STable will be deleted automatically. ## Show All STables @@ -40,7 +40,7 @@ All the sub-tables created using the deleted STable will be deleted automaticall SHOW STableS [LIKE tb_name_wildcard]; ``` -This command can be used to display the information of all STables in the current database, including name, creation time, number of columns, number of tags, number of tables created using this STable. +This command can be used to display the information of all STables in the current database, including name, creation time, number of columns, number of tags, and number of tables created using this STable. ## Show The Create Statement of A STable @@ -48,7 +48,7 @@ This command can be used to display the information of all STables in the curren SHOW CREATE STable stb_name; ``` -This command is useful in migrating data from one TDengine cluster to another one because it can be used to create an exactly same STable in the target database. +This command is useful in migrating data from one TDengine cluster to another because it can be used to create the exact same STable in the target database. ## Get STable Definition @@ -76,7 +76,7 @@ ALTER STable stb_name DROP COLUMN field_name; ALTER STable stb_name MODIFY COLUMN field_name data_type(length); ``` -This command can be used to change (or increase, more specifically) the length of a column of variable length types, like BINARY or NCHAR. +This command can be used to change (or more specifically, increase) the length of a column of variable length types, like BINARY or NCHAR. ## Change Tags of A STable @@ -94,7 +94,7 @@ This command is used to add a new tag for a STable and specify the tag type. ALTER STable stb_name DROP TAG tag_name; ``` -The tag will be removed automatically from all the sub tables crated using the super table as template once a tag is removed from a super table. +The tag will be removed automatically from all the subtables, created using the super table as template, once a tag is removed from a super table. ### Change A Tag @@ -102,7 +102,7 @@ The tag will be removed automatically from all the sub tables crated using the s ALTER STable stb_name CHANGE TAG old_tag_name new_tag_name; ``` -The tag name will be changed automatically from all the sub tables crated using the super table as template once a tag name is changed for a super table. +The tag name will be changed automatically for all the subtables, created using the super table as template, once a tag name is changed for a super table. ### Change Tag Length @@ -110,9 +110,9 @@ The tag name will be changed automatically from all the sub tables crated using ALTER STable stb_name MODIFY TAG tag_name data_type(length); ``` -This command can be used to change (or increase, more specifically) the length of a tag of variable length types, like BINARY or NCHAR. +This command can be used to change (or more specifically, increase) the length of a tag of variable length types, like BINARY or NCHAR. :::note -Changing tag value can be applied to only sub tables. All other tag operations, like add tag, remove tag, however, can be applied to only STable. If a new tag is added for a STable, the tag will be added with NULL value for all its sub tables. +Changing tag values can be applied to only subtables. All other tag operations, like add tag, remove tag, however, can be applied to only STable. If a new tag is added for a STable, the tag will be added with NULL value for all its subtables. ::: diff --git a/docs-en/12-taos-sql/05-insert.md b/docs-en/12-taos-sql/05-insert.md index 96e6a08ee17e0c72b15a35efc487a78ae4673017..1336cd7238a19190583ea9d268a64df242ffd3c9 100644 --- a/docs-en/12-taos-sql/05-insert.md +++ b/docs-en/12-taos-sql/05-insert.md @@ -19,15 +19,15 @@ INSERT INTO ## Insert Single or Multiple Rows -Single row or multiple rows specified with VALUES can be inserted into a specific table. For example +Single row or multiple rows specified with VALUES can be inserted into a specific table. For example: -Single row is inserted using below statement. +A single row is inserted using the below statement. ```sq; INSERT INTO d1001 VALUES (NOW, 10.2, 219, 0.32); ``` -Double rows can be inserted using below statement. +Double rows are inserted using the below statement. ```sql INSERT INTO d1001 VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32) (1626164208000, 10.15, 217, 0.33); @@ -36,7 +36,7 @@ INSERT INTO d1001 VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32) (162616420 :::note 1. In the second example above, different formats are used in the two rows to be inserted. In the first row, the timestamp format is a date and time string, which is interpreted from the string value only. In the second row, the timestamp format is a long integer, which will be interpreted based on the database time precision. -2. When trying to insert multiple rows in single statement, only the timestamp of one row can be set as NOW, otherwise there will be duplicate timestamps among the rows and the result may be out of expectation because NOW will be interpreted as the time when the statement is executed. +2. When trying to insert multiple rows in a single statement, only the timestamp of one row can be set as NOW, otherwise there will be duplicate timestamps among the rows and the result may be out of expectation because NOW will be interpreted as the time when the statement is executed. 3. The oldest timestamp that is allowed is subtracting the KEEP parameter from current time. 4. The newest timestamp that is allowed is adding the DAYS parameter to current time. @@ -51,13 +51,13 @@ INSERT INTO d1001 (ts, current, phase) VALUES ('2021-07-13 14:06:33.196', 10.27, ``` :::info -If no columns are explicitly specified, all the columns must be provided with values, this is called "all column mode". The insert performance of all column mode is much better than specifying a part of columns, so it's encouraged to use "all column mode" while providing NULL value explicitly for the columns for which no actual value can be provided. +If no columns are explicitly specified, all the columns must be provided with values, this is called "all column mode". The insert performance of all column mode is much better than specifying a subset of columns, so it's encouraged to use "all column mode" while providing NULL value explicitly for the columns for which no actual value can be provided. ::: ## Insert Into Multiple Tables -One or multiple rows can be inserted into multiple tables in single SQL statement, with or without specifying specific columns. +One or multiple rows can be inserted into multiple tables in a single SQL statement, with or without specifying specific columns. ```sql INSERT INTO d1001 VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33) @@ -66,40 +66,40 @@ INSERT INTO d1001 VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07- ## Automatically Create Table When Inserting -If it's not sure whether the table already exists, the table can be created automatically while inserting using below SQL statement. To use this functionality, a STable must be used as template and tag values must be provided. +If it's unknown whether the table already exists, the table can be created automatically while inserting using the SQL statement below. To use this functionality, a STable must be used as template and tag values must be provided. ```sql -INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32); +INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32); ``` -It's not necessary to provide values for all tag when creating tables automatically, the tags without values provided will be set to NULL. +It's not necessary to provide values for all tags when creating tables automatically, the tags without values provided will be set to NULL. ```sql INSERT INTO d21001 USING meters (groupId) TAGS (2) VALUES ('2021-07-13 14:06:33.196', 10.15, 217, 0.33); ``` -Multiple rows can also be inserted into same table in single SQL statement using this way. +Multiple rows can also be inserted into the same table in a single SQL statement. ```sql -INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33) +INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33) d21002 USING meters (groupId) TAGS (2) VALUES ('2021-07-13 14:06:34.255', 10.15, 217, 0.33) d21003 USING meters (groupId) TAGS (2) (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31); ``` :::info -Prior to version 2.0.20.5, when using `INSERT` to create table automatically and specify the columns, the column names must follow the table name immediately. From version 2.0.20.5, the column names can follow the table name immediately, also can be put between `TAGS` and `VALUES`. In same SQL statement, however, these two ways of specifying column names can't be mixed. +Prior to version 2.0.20.5, when using `INSERT` to create tables automatically and specifying the columns, the column names must follow the table name immediately. From version 2.0.20.5, the column names can follow the table name immediately, also can be put between `TAGS` and `VALUES`. In the same SQL statement, however, these two ways of specifying column names can't be mixed. ::: ## Insert Rows From A File -Besides using `VALUES` to insert one or multiple rows, the data to be inserted can also be prepared in a CSV file with comma as separator and each field value quoted by single quotes. Table definition is not required in the CSV file. For example, if file "/tmp/csvfile.csv" contains below data: +Besides using `VALUES` to insert one or multiple rows, the data to be inserted can also be prepared in a CSV file with comma as separator and each field value quoted by single quotes. Table definition is not required in the CSV file. For example, if file "/tmp/csvfile.csv" contains the below data: ``` '2021-07-13 14:07:34.630', '10.2', '219', '0.32' '2021-07-13 14:07:35.779', '10.15', '217', '0.33' ``` -Then data in this file can be inserted by below SQL statement: +Then data in this file can be inserted by the SQL statement below: ```sql INSERT INTO d1001 FILE '/tmp/csvfile.csv'; @@ -107,30 +107,30 @@ INSERT INTO d1001 FILE '/tmp/csvfile.csv'; ## Create Tables Automatically and Insert Rows From File -From version 2.1.5.0, tables can be automatically created using a super table as template when inserting data from a CSV file, Like below: +From version 2.1.5.0, tables can be automatically created using a super table as template when inserting data from a CSV file, like below: ```sql -INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) FILE '/tmp/csvfile.csv'; +INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile.csv'; ``` -Multiple tables can be automatically created and inserted in single SQL statement, like below: +Multiple tables can be automatically created and inserted in a single SQL statement, like below: ```sql -INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) FILE '/tmp/csvfile_21001.csv' +INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile_21001.csv' d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv'; ``` ## More About Insert -For SQL statement like `insert`, stream parsing strategy is applied. That means before an error is found and the execution is aborted, the part prior to the error point has already been executed. Below is an experiment to help understand the behavior. +For SQL statement like `insert`, a stream parsing strategy is applied. That means before an error is found and the execution is aborted, the part prior to the error point has already been executed. Below is an experiment to help understand the behavior. -Firstly, a super table is created. +First, a super table is created. ```sql CREATE TABLE meters(ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS(location BINARY(30), groupId INT); ``` -It can be proved that the super table has been created by `SHOW STableS`, but no table exists by `SHOW TABLES`. +It can be proven that the super table has been created by `SHOW STableS`, but no table exists using `SHOW TABLES`. ``` taos> SHOW STableS; @@ -146,7 +146,7 @@ Query OK, 0 row(s) in set (0.000946s) Then, try to create table d1001 automatically when inserting data into it. ```sql -INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2) VALUES('a'); +INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('a'); ``` The output shows the value to be inserted is invalid. But `SHOW TABLES` proves that the table has been created automatically by the `INSERT` statement. @@ -161,4 +161,4 @@ taos> SHOW TABLES; Query OK, 1 row(s) in set (0.001091s) ``` -From the above experiment, we can see that even though the value to be inserted is invalid but the table is still created. +From the above experiment, we can see that while the value to be inserted is invalid the table is still created. diff --git a/docs-en/12-taos-sql/06-select.md b/docs-en/12-taos-sql/06-select.md index 11b181f65d4e7e0e7d47d04986b144ff362c879f..8a017cf92e40aa4a854dcd531b7df291a9243515 100644 --- a/docs-en/12-taos-sql/06-select.md +++ b/docs-en/12-taos-sql/06-select.md @@ -21,7 +21,7 @@ SELECT select_expr [, select_expr ...] ## Wildcard -Wilcard \* can be used to specify all columns. The result includes only data columns for normal tables. +Wildcard \* can be used to specify all columns. The result includes only data columns for normal tables. ``` taos> SELECT * FROM d1001; @@ -39,26 +39,26 @@ The result includes both data columns and tag columns for super table. taos> SELECT * FROM meters; ts | current | voltage | phase | location | groupid | ===================================================================================================================================== - 2018-10-03 14:38:05.500 | 11.80000 | 221 | 0.28000 | Beijing.Haidian | 2 | - 2018-10-03 14:38:16.600 | 13.40000 | 223 | 0.29000 | Beijing.Haidian | 2 | - 2018-10-03 14:38:05.000 | 10.80000 | 223 | 0.29000 | Beijing.Haidian | 3 | - 2018-10-03 14:38:06.500 | 11.50000 | 221 | 0.35000 | Beijing.Haidian | 3 | - 2018-10-03 14:38:04.000 | 10.20000 | 220 | 0.23000 | Beijing.Chaoyang | 3 | - 2018-10-03 14:38:16.650 | 10.30000 | 218 | 0.25000 | Beijing.Chaoyang | 3 | - 2018-10-03 14:38:05.000 | 10.30000 | 219 | 0.31000 | Beijing.Chaoyang | 2 | - 2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 | Beijing.Chaoyang | 2 | - 2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 | Beijing.Chaoyang | 2 | + 2018-10-03 14:38:05.500 | 11.80000 | 221 | 0.28000 | California.LoSangeles | 2 | + 2018-10-03 14:38:16.600 | 13.40000 | 223 | 0.29000 | California.LoSangeles | 2 | + 2018-10-03 14:38:05.000 | 10.80000 | 223 | 0.29000 | California.LoSangeles | 3 | + 2018-10-03 14:38:06.500 | 11.50000 | 221 | 0.35000 | California.LoSangeles | 3 | + 2018-10-03 14:38:04.000 | 10.20000 | 220 | 0.23000 | California.SanFrancisco | 3 | + 2018-10-03 14:38:16.650 | 10.30000 | 218 | 0.25000 | California.SanFrancisco | 3 | + 2018-10-03 14:38:05.000 | 10.30000 | 219 | 0.31000 | California.SanFrancisco | 2 | + 2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 | California.SanFrancisco | 2 | + 2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 | California.SanFrancisco | 2 | Query OK, 9 row(s) in set (0.002022s) ``` -Wildcard can be used with table name as prefix, both below SQL statements have same effects and return all columns. +Wildcard can be used with table name as prefix. Both SQL statements below have the same effect and return all columns. ```SQL SELECT * FROM d1001; SELECT d1001.* FROM d1001; ``` -In JOIN query, however, with or without table name prefix will return different results. \* without table prefix will return all the columns of both tables, but \* with table name as prefix will return only the columns of that table. +In a JOIN query, however, the results are different with or without a table name prefix. \* without table prefix will return all the columns of both tables, but \* with table name as prefix will return only the columns of that table. ``` taos> SELECT * FROM d1001, d1003 WHERE d1001.ts=d1003.ts; @@ -76,7 +76,7 @@ taos> SELECT d1001.* FROM d1001,d1003 WHERE d1001.ts = d1003.ts; Query OK, 1 row(s) in set (0.020443s) ``` -Wilcard \* can be used with some functions, but the result may be different depending on the function being used. For example, `count(*)` returns only one column, i.e. the number of rows; `first`, `last` and `last_row` return all columns of the selected row. +Wildcard \* can be used with some functions, but the result may be different depending on the function being used. For example, `count(*)` returns only one column, i.e. the number of rows; `first`, `last` and `last_row` return all columns of the selected row. ``` taos> SELECT COUNT(*) FROM d1001; @@ -96,20 +96,20 @@ Query OK, 1 row(s) in set (0.000849s) ## Tags -Starting from version 2.0.14, tag columns can be selected together with data columns when querying sub tables. Please be noted that, however, wildcard \* doesn't represent any tag column, that means tag columns must be specified explicitly like below example. +Starting from version 2.0.14, tag columns can be selected together with data columns when querying sub tables. Please note however, that, wildcard \* cannot be used to represent any tag column. This means that tag columns must be specified explicitly like the example below. ``` taos> SELECT location, groupid, current FROM d1001 LIMIT 2; location | groupid | current | ====================================================================== - Beijing.Chaoyang | 2 | 10.30000 | - Beijing.Chaoyang | 2 | 12.60000 | + California.SanFrancisco | 2 | 10.30000 | + California.SanFrancisco | 2 | 12.60000 | Query OK, 2 row(s) in set (0.003112s) ``` ## Get distinct values -`DISTINCT` keyword can be used to get all the unique values of tag columns from a super table, it can also be used to get all the unique values of data columns from a table or sub table. +`DISTINCT` keyword can be used to get all the unique values of tag columns from a super table. It can also be used to get all the unique values of data columns from a table or subtable. ```sql SELECT DISTINCT tag_name [, tag_name ...] FROM stb_name; @@ -118,15 +118,15 @@ SELECT DISTINCT col_name [, col_name ...] FROM tb_name; :::info -1. Configuration parameter `maxNumOfDistinctRes` in `taos.cfg` is used to control the number of rows to output. The minimum configurable value is 100,000, the maximum configurable value is 100,000,000, the default value is 1000,000. If the actual number of rows exceeds the value of this parameter, only the number of rows specified by this parameter will be output. -2. It can't be guaranteed that the results selected by using `DISTINCT` on columns of `FLOAT` or `DOUBLE` are exactly unique because of the precision nature of floating numbers. -3. `DISTINCT` can't be used in the sub-query of a nested query statement, and can't be used together with aggregate functions, `GROUP BY` or `JOIN` in same SQL statement. +1. Configuration parameter `maxNumOfDistinctRes` in `taos.cfg` is used to control the number of rows to output. The minimum configurable value is 100,000, the maximum configurable value is 100,000,000, the default value is 1,000,000. If the actual number of rows exceeds the value of this parameter, only the number of rows specified by this parameter will be output. +2. It can't be guaranteed that the results selected by using `DISTINCT` on columns of `FLOAT` or `DOUBLE` are exactly unique because of the precision errors in floating point numbers. +3. `DISTINCT` can't be used in the sub-query of a nested query statement, and can't be used together with aggregate functions, `GROUP BY` or `JOIN` in the same SQL statement. ::: ## Columns Names of Result Set -When using `SELECT`, the column names in the result set will be same as that in the select clause if `AS` is not used. `AS` can be used to rename the column names in the result set. For example +When using `SELECT`, the column names in the result set will be the same as that in the select clause if `AS` is not used. `AS` can be used to rename the column names in the result set. For example ``` taos> SELECT ts, ts AS primary_key_ts FROM d1001; @@ -161,7 +161,7 @@ SELECT * FROM d1001; ## Special Query -Some special query functionalities can be performed without `FORM` sub-clause. For example, below statement can be used to get the current database in use. +Some special query functions can be invoked without `FROM` sub-clause. For example, the statement below can be used to get the current database in use. ``` taos> SELECT DATABASE(); @@ -181,7 +181,7 @@ taos> SELECT DATABASE(); Query OK, 1 row(s) in set (0.000184s) ``` -Below statement can be used to get the version of client or server. +The statement below can be used to get the version of client or server. ``` taos> SELECT CLIENT_VERSION(); @@ -197,7 +197,7 @@ taos> SELECT SERVER_VERSION(); Query OK, 1 row(s) in set (0.000077s) ``` -Below statement is used to check the server status. One integer, like `1`, is returned if the server status is OK, otherwise an error code is returned. This way is compatible with the status check for TDengine from connection pool or 3rd party tools, and can avoid the problem of losing connection from connection pool when using wrong heartbeat checking SQL statement. +The statement below is used to check the server status. An integer, like `1`, is returned if the server status is OK, otherwise an error code is returned. This is compatible with the status check for TDengine from connection pool or 3rd party tools, and can avoid the problem of losing the connection from a connection pool when using the wrong heartbeat checking SQL statement. ``` taos> SELECT SERVER_STATUS(); @@ -248,12 +248,12 @@ summary: ## Special Keywords in TAOS SQL -- `TBNAME`: it is treated as a special tag when selecting on a super table, representing the name of sub-tables in that super table. +- `TBNAME`: it is treated as a special tag when selecting on a super table, representing the name of subtables in that super table. - `_c0`: represents the first column of a table or super table. ## Tips -To get all the sub tables and corresponding tag values from a super table: +To get all the subtables and corresponding tag values from a super table: ```SQL SELECT TBNAME, location FROM meters; @@ -271,10 +271,10 @@ Only filter on `TAGS` are allowed in the `where` clause for above two query stat taos> SELECT TBNAME, location FROM meters; tbname | location | ================================================================== - d1004 | Beijing.Haidian | - d1003 | Beijing.Haidian | - d1002 | Beijing.Chaoyang | - d1001 | Beijing.Chaoyang | + d1004 | California.LosAngeles | + d1003 | California.LosAngeles | + d1002 | California.SanFrancisco | + d1001 | California.SanFrancisco | Query OK, 4 row(s) in set (0.000881s) taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2; @@ -284,11 +284,11 @@ taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2; Query OK, 1 row(s) in set (0.001091s) ``` -- Wildcard \* can be used to get all columns, or specific column names can be specified. Arithmetic operation can be performed on columns of number types, columns can be renamed in the result set. -- Arithmetic operation on columns can't be used in where clause. For example, `where a*2>6;` is not allowed but `where a>6/2;` can be used instead for same purpose. +- Wildcard \* can be used to get all columns, or specific column names can be specified. Arithmetic operation can be performed on columns of numerical types, columns can be renamed in the result set. +- Arithmetic operation on columns can't be used in where clause. For example, `where a*2>6;` is not allowed but `where a>6/2;` can be used instead for the same purpose. - Arithmetic operation on columns can't be used as the objectives of select statement. For example, `select min(2*a) from t;` is not allowed but `select 2*min(a) from t;` can be used instead. - Logical operation can be used in `WHERE` clause to filter numeric values, wildcard can be used to filter string values. -- Result set are arranged in ascending order of the first column, i.e. timestamp, but it can be controlled to output as descending order of timestamp. If `order by` is used on other columns, the result may be not as expected. By the way, \_c0 is used to represent the first column, i.e. timestamp. +- Result sets are arranged in ascending order of the first column, i.e. timestamp, but it can be controlled to output as descending order of timestamp. If `order by` is used on other columns, the result may not be as expected. By the way, \_c0 is used to represent the first column, i.e. timestamp. - `LIMIT` parameter is used to control the number of rows to output. `OFFSET` parameter is used to specify from which row to output. `LIMIT` and `OFFSET` are executed after `ORDER BY` in the query execution. A simple tip is that `LIMIT 5 OFFSET 2` can be abbreviated as `LIMIT 2, 5`. - What is controlled by `LIMIT` is the number of rows in each group when `GROUP BY` is used. - `SLIMIT` parameter is used to control the number of groups when `GROUP BY` is used. Similar to `LIMIT`, `SLIMIT 5 OFFSET 2` can be abbreviated as `SLIMIT 2, 5`. @@ -296,7 +296,7 @@ Query OK, 1 row(s) in set (0.001091s) ## Where -Logical operations in below table can be used in `where` clause to filter the resulting rows. +Logical operations in below table can be used in the `where` clause to filter the resulting rows. | **Operation** | **Note** | **Applicable Data Types** | | ------------- | ------------------------ | ----------------------------------------- | @@ -314,17 +314,17 @@ Logical operations in below table can be used in `where` clause to filter the re **Explanations**: -- Operator `<\>` is equal to `!=`, please be noted that this operator can't be used on the first column of any table, i.e.timestamp column. +- Operator `<\>` is equal to `!=`, please note that this operator can't be used on the first column of any table, i.e.timestamp column. - Operator `like` is used together with wildcards to match strings - '%' matches 0 or any number of characters, '\_' matches any single ASCII character. - `\_` is used to match the \_ in the string. - - The maximum length of wildcard string is 100 bytes from version 2.1.6.1 (before that the maximum length is 20 bytes). `maxWildCardsLength` in `taos.cfg` can be used to control this threshold. Too long wildcard string may slowdown the execution performance of `LIKE` operator. + - The maximum length of wildcard string is 100 bytes from version 2.1.6.1 (before that the maximum length is 20 bytes). `maxWildCardsLength` in `taos.cfg` can be used to control this threshold. A very long wildcard string may slowdown the execution performance of `LIKE` operator. - `AND` keyword can be used to filter multiple columns simultaneously. AND/OR operation can be performed on single or multiple columns from version 2.3.0.0. However, before 2.3.0.0 `OR` can't be used on multiple columns. - For timestamp column, only one condition can be used; for other columns or tags, `OR` keyword can be used to combine multiple logical operators. For example, `((value > 20 AND value < 30) OR (value < 12))`. - From version 2.3.0.0, multiple conditions can be used on timestamp column, but the result set can only contain single time range. - From version 2.0.17.0, operator `BETWEEN AND` can be used in where clause, for example `WHERE col2 BETWEEN 1.5 AND 3.25` means the filter condition is equal to "1.5 ≤ col2 ≤ 3.25". -- From version 2.1.4.0, operator `IN` can be used in where clause. For example, `WHERE city IN ('Beijing', 'Shanghai')`. For bool type, both `{true, false}` and `{0, 1}` are allowed, but integers other than 0 or 1 are not allowed. FLOAT and DOUBLE types are impacted by floating precision, only values that match the condition within the tolerance will be selected. Non-primary key column of timestamp type can be used with `IN`. -- From version 2.3.0.0, regular expression is supported in where clause with keyword `match` or `nmatch`, the regular expression is case insensitive. +- From version 2.1.4.0, operator `IN` can be used in the where clause. For example, `WHERE city IN ('California.SanFrancisco', 'California.SanDiego')`. For bool type, both `{true, false}` and `{0, 1}` are allowed, but integers other than 0 or 1 are not allowed. FLOAT and DOUBLE types are impacted by floating point precision errors. Only values that match the condition within the tolerance will be selected. Non-primary key column of timestamp type can be used with `IN`. +- From version 2.3.0.0, regular expression is supported in the where clause with keyword `match` or `nmatch`. The regular expression is case insensitive. ## Regular Expression @@ -342,11 +342,11 @@ The regular expression being used must be compliant with POSIX specification, pl Regular expression can be used against only table names, i.e. `tbname`, and tags of binary/nchar types, but can't be used against data columns. -The maximum length of regular expression string is 128 bytes. Configuration parameter `maxRegexStringLen` can be used to set the maximum allowed regular expression. It's a configuration parameter on client side, and will take in effect after restarting the client. +The maximum length of regular expression string is 128 bytes. Configuration parameter `maxRegexStringLen` can be used to set the maximum allowed regular expression. It's a configuration parameter on the client side, and will take effect after restarting the client. ## JOIN -From version 2.2.0.0, inner join is fully supported in TDengine. More specifically, the inner join between table and table, that between STable and STable, and that between sub query and sub query are supported. +From version 2.2.0.0, inner join is fully supported in TDengine. More specifically, the inner join between table and table, between STable and STable, and between sub query and sub query are supported. Only primary key, i.e. timestamp, can be used in the join operation between table and table. For example: @@ -364,12 +364,12 @@ FROM temp_STable t1, temp_STable t2 WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0; ``` -Similary, join operation can be performed on the result set of multiple sub queries. +Similarly, join operations can be performed on the result set of multiple sub queries. :::note Restrictions on join operation: -- The number of tables or STables in single join operation can't exceed 10. +- The number of tables or STables in a single join operation can't exceed 10. - `FILL` is not allowed in the query statement that includes JOIN operation. - Arithmetic operation is not allowed on the result set of join operation. - `GROUP BY` is not allowed on a part of tables that participate in join operation. @@ -380,9 +380,9 @@ Restrictions on join operation: ## Nested Query -Nested query is also called sub query, that means in a single SQL statement the result of inner query can be used as the data source of the outer query. +Nested query is also called sub query. This means that in a single SQL statement the result of inner query can be used as the data source of the outer query. -From 2.2.0.0, unassociated sub query can be used in the `FROM` clause. unassociated means the sub query doesn't use the parameters in the parent query. More specifically, in the `tb_name_list` of `SELECT` statement, an independent SELECT statement can be used. So a complete nested query looks like: +From 2.2.0.0, unassociated sub query can be used in the `FROM` clause. Unassociated means the sub query doesn't use the parameters in the parent query. More specifically, in the `tb_name_list` of `SELECT` statement, an independent SELECT statement can be used. So a complete nested query looks like: ```SQL SELECT ... FROM (SELECT ... FROM ...) ...; @@ -390,14 +390,14 @@ SELECT ... FROM (SELECT ... FROM ...) ...; :::info -- Only one layer of nesting is allowed, that means no sub query is allowed in a sub query -- The result set returned by the inner query will be used as a "virtual table" by the outer query, the "virtual table" can be renamed using `AS` keyword for easy reference in the outer query. +- Only one layer of nesting is allowed, that means no sub query is allowed within a sub query +- The result set returned by the inner query will be used as a "virtual table" by the outer query. The "virtual table" can be renamed using `AS` keyword for easy reference in the outer query. - Sub query is not allowed in continuous query. - JOIN operation is allowed between tables/STables inside both inner and outer queries. Join operation can be performed on the result set of the inner query. - UNION operation is not allowed in either inner query or outer query. -- The functionalities that can be used in the inner query is same as non-nested query. - - `ORDER BY` inside the inner query doesn't make any sense but will slow down the query performance significantly, so please avoid such usage. -- Compared to the non-nested query, the functionalities that can be used in the outer query have such restrictions as: +- The functions that can be used in the inner query are the same as those that can be used in a non-nested query. + - `ORDER BY` inside the inner query is unnecessary and will slow down the query performance significantly. It is best to avoid the use of `ORDER BY` inside the inner query. +- Compared to the non-nested query, the functionality that can be used in the outer query has the following restrictions: - Functions - If the result set returned by the inner query doesn't contain timestamp column, then functions relying on timestamp can't be used in the outer query, like `TOP`, `BOTTOM`, `FIRST`, `LAST`, `DIFF`. - Functions that need to scan the data twice can't be used in the outer query, like `STDDEV`, `PERCENTILE`. @@ -414,7 +414,7 @@ UNION ALL SELECT ... [UNION ALL SELECT ...] ``` -`UNION ALL` operator can be used to combine the result set from multiple select statements as long as the result set of these select statements have exactly same columns. `UNION ALL` doesn't remove redundant rows from multiple result sets. In single SQL statement, at most 100 `UNION ALL` can be supported. +`UNION ALL` operator can be used to combine the result set from multiple select statements as long as the result set of these select statements have exactly the same columns. `UNION ALL` doesn't remove redundant rows from multiple result sets. In a single SQL statement, at most 100 `UNION ALL` can be supported. ### Examples @@ -442,8 +442,8 @@ The sum of col1 and col2 for rows later than 2018-06-01 08:00:00.000 and whose c SELECT (col1 + col2) AS 'complex' FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' AND col2 > 1.2 LIMIT 10 OFFSET 5; ``` -The rows in the past 10 minutes and whose col2 is bigger than 3.14 are selected and output to the result file `/home/testoutpu.csv` with below SQL statement: +The rows in the past 10 minutes and whose col2 is bigger than 3.14 are selected and output to the result file `/home/testoutput.csv` with below SQL statement: ```SQL -SELECT COUNT(*) FROM tb1 WHERE ts >= NOW - 10m AND col2 > 3.14 >> /home/testoutpu.csv; +SELECT COUNT(*) FROM tb1 WHERE ts >= NOW - 10m AND col2 > 3.14 >> /home/testoutput.csv; ``` diff --git a/docs-en/12-taos-sql/07-function.md b/docs-en/12-taos-sql/07-function.md index 9db5f36f92735c659a3bfae84c67089c62d577a6..825aeea354fb684e47ceed7afb2bc66d97b23c09 100644 --- a/docs-en/12-taos-sql/07-function.md +++ b/docs-en/12-taos-sql/07-function.md @@ -4,7 +4,7 @@ title: Functions ## Aggregate Functions -Aggregate query is supported in TDengine by following aggregate functions and selection functions. +Aggregate queries are supported in TDengine by the following aggregate functions and selection functions. ### COUNT @@ -12,18 +12,18 @@ Aggregate query is supported in TDengine by following aggregate functions and se SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause]; ``` -**Description**:Get the number of rows or the number of non-null values in a table or a super table. +**Description**: Get the number of rows or the number of non-null values in a table or a super table. -**Return value type**:Long integer INT64 +**Return value type**: Long integer INT64 -**Applicable column types**:All +**Applicable column types**: All **Applicable table types**: table, super table, sub table **More explanation**: -- Wildcard (\*) can be used to represent all columns, it's used to get the number of all rows -- The number of non-NULL values will be returned if this function is used on a specific column +- Wildcard (\*) is used to represent all columns. The `COUNT` function is used to get the total number of all rows. +- The number of non-NULL values will be returned if this function is used on a specific column. **Examples**: @@ -47,13 +47,13 @@ Query OK, 1 row(s) in set (0.001075s) SELECT AVG(field_name) FROM tb_name [WHERE clause]; ``` -**Description**:Get the average value of a column in a table or STable +**Description**: Get the average value of a column in a table or STable -**Return value type**:Double precision floating number +**Return value type**: Double precision floating number -**Applicable column types**:Data types except for timestamp, binary, nchar and bool +**Applicable column types**: Data types except for timestamp, binary, nchar and bool -**Applicable table types**:table, STable +**Applicable table types**: table, STable **Examples**: @@ -77,17 +77,17 @@ Query OK, 1 row(s) in set (0.000943s) SELECT TWA(field_name) FROM tb_name WHERE clause; ``` -**Description**:Time weighted average on a specific column within a time range +**Description**: Time weighted average on a specific column within a time range -**Return value type**:Double precision floating number +**Return value type**: Double precision floating number -**Applicable column types**:Data types except for timestamp, binary, nchar and bool +**Applicable column types**: Data types except for timestamp, binary, nchar and bool -**Applicable table types**:table, STable +**Applicable table types**: table, STable **More explanations**: -- From version 2.1.3.0, function TWA can be used on stable with `GROUP BY`, i.e. timelines generated by `GROUP BY tbname` on a STable. +- Since version 2.1.3.0, function TWA can be used on stable with `GROUP BY`, i.e. timelines generated by `GROUP BY tbname` on a STable. ### IRATE @@ -95,17 +95,17 @@ SELECT TWA(field_name) FROM tb_name WHERE clause; SELECT IRATE(field_name) FROM tb_name WHERE clause; ``` -**Description**:instantaneous rate on a specific column. The last two samples in the specified time range are used to calculate instantaneous rate. If the last sample value is smaller, then only the last sample value is used instead of the difference between the last two sample values. +**Description**: instantaneous rate on a specific column. The last two samples in the specified time range are used to calculate instantaneous rate. If the last sample value is smaller, then only the last sample value is used instead of the difference between the last two sample values. -**Return value type**:Double precision floating number +**Return value type**: Double precision floating number -**Applicable column types**:Data types except for timestamp, binary, nchar and bool +**Applicable column types**: Data types except for timestamp, binary, nchar and bool -**Applicable table types**:table, STable +**Applicable table types**: table, STable **More explanations**: -- From version 2.1.3.0, function IRATE can be used on stble with `GROUP BY`, i.e. timelines generated by `GROUP BY tbname` on a STable. +- Since version 2.1.3.0, function IRATE can be used on stble with `GROUP BY`, i.e. timelines generated by `GROUP BY tbname` on a STable. ### SUM @@ -113,13 +113,13 @@ SELECT IRATE(field_name) FROM tb_name WHERE clause; SELECT SUM(field_name) FROM tb_name [WHERE clause]; ``` -**Description**:The sum of a specific column in a table or STable +**Description**: The sum of a specific column in a table or STable -**Return value type**:Double precision floating number or long integer +**Return value type**: Double precision floating number or long integer -**Applicable column types**:Data types except for timestamp, binary, nchar and bool +**Applicable column types**: Data types except for timestamp, binary, nchar and bool -**Applicable table types**:table, STable +**Applicable table types**: table, STable **Examples**: @@ -143,13 +143,13 @@ Query OK, 1 row(s) in set (0.000980s) SELECT STDDEV(field_name) FROM tb_name [WHERE clause]; ``` -**Description**:Standard deviation of a specific column in a table or STable +**Description**: Standard deviation of a specific column in a table or STable -**Return value type**:Double precision floating number +**Return value type**: Double precision floating number -**Applicable column types**:Data types except for timestamp, binary, nchar and bool +**Applicable column types**: Data types except for timestamp, binary, nchar and bool -**Applicable table types**:table, STable (starting from version 2.0.15.1) +**Applicable table types**: table, STable (since version 2.0.15.1) **Examples**: @@ -193,13 +193,13 @@ SELECT MODE(field_name) FROM tb_name [WHERE clause]; **Description**:The value which has the highest frequency of occurrence. NULL is returned if there are multiple values which have highest frequency of occurrence. It can't be used on timestamp column or tags. -**Return value type**:Same as the data type of the column being operated +**Return value type**:Same as the data type of the column being operated upon **Applicable column types**:Data types except for timestamp **More explanations**:Considering the number of returned result set is unpredictable, it's suggested to limit the number of unique values to 100,000, otherwise error will be returned. -**Applicable version**:From version 2.6.0.0 +**Applicable version**:Since version 2.6.0.0 **Examples**: @@ -234,7 +234,7 @@ SELECT HYPERLOGLOG(field_name) FROM { tb_name | stb_name } [WHERE clause]; **More explanations**: The benefit of using hyperloglog algorithm is that the memory usage is under control when the data volume is huge. However, when the data volume is very small, the result may be not accurate, it's recommented to use `select count(data) from (select unique(col) as data from table)` in this case. -**Applicable versions**:From version 2.6.0.0 +**Applicable versions**:Since version 2.6.0.0 **Examples**: @@ -261,7 +261,7 @@ Query OK, 1 row(s) in set (0.008388s) ## Selection Functions -When any selective function is used, timestamp column or tag columns including `tbname` can be specified to show that the selected value are from which rows. +When any select function is used, timestamp column or tag columns including `tbname` can be specified to show that the selected value are from which rows. ### MIN @@ -269,13 +269,13 @@ When any selective function is used, timestamp column or tag columns including ` SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause]; ``` -**Description**:The minimum value of a specific column in a table or STable +**Description**: The minimum value of a specific column in a table or STable -**Return value type**:Same as the data type of the column being operated +**Return value type**: Same as the data type of the column being operated upon -**Applicable column types**:Data types except for timestamp, binary, nchar and bool +**Applicable column types**: Data types except for timestamp, binary, nchar and bool -**Applicable table types**:table, STable +**Applicable table types**: table, STable **Examples**: @@ -299,13 +299,13 @@ Query OK, 1 row(s) in set (0.000950s) SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**:The maximum value of a specific column of a table or STable +**Description**: The maximum value of a specific column of a table or STable -**Return value type**:Same as the data type of the column being operated +**Return value type**: Same as the data type of the column being operated upon -**Applicable column types**:Data types except for timestamp, binary, nchar and bool +**Applicable column types**: Data types except for timestamp, binary, nchar and bool -**Applicable table types**:table, STable +**Applicable table types**: table, STable **Examples**: @@ -329,19 +329,19 @@ Query OK, 1 row(s) in set (0.000987s) SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**:The first non-null value of a specific column in a table or STable +**Description**: The first non-null value of a specific column in a table or STable -**Return value type**:Same as the column being operated +**Return value type**: Same as the column being operated upon -**Applicable column types**:Any data type +**Applicable column types**: Any data type -**Applicable table types**:table, STable +**Applicable table types**: table, STable **More explanations**: - FIRST(\*) can be used to get the first non-null value of all columns - NULL will be returned if all the values of the specified column are all NULL -- No result will NOT be returned if all the columns in the result set are all NULL +- A result will NOT be returned if all the columns in the result set are all NULL **Examples**: @@ -365,13 +365,13 @@ Query OK, 1 row(s) in set (0.001023s) SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**:The last non-NULL value of a specific column in a table or STable +**Description**: The last non-NULL value of a specific column in a table or STable -**Return value type**:Same as the column being operated +**Return value type**: Same as the column being operated upon -**Applicable column types**:Any data type +**Applicable column types**: Any data type -**Applicable table types**:table, STable +**Applicable table types**: table, STable **More explanations**: @@ -403,11 +403,11 @@ SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause]; **Description**: The greatest _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly. -**Return value type**:Same as the column being operated +**Return value type**: Same as the column being operated upon -**Applicable column types**:Data types except for timestamp, binary, nchar and bool +**Applicable column types**: Data types except for timestamp, binary, nchar and bool -**Applicable table types**:table, STable +**Applicable table types**: table, STable **More explanations**: @@ -440,9 +440,9 @@ Query OK, 2 row(s) in set (0.000810s) SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**:The least _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly. +**Description**: The least _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly. -**Return value type**:Same as the column being operated +**Return value type**: Same as the column being operated upon **Applicable column types**: Data types except for timestamp, binary, nchar and bool @@ -549,7 +549,7 @@ SELECT LAST_ROW(field_name) FROM { tb_name | stb_name }; **Description**: The last row of a table or STable -**Return value type**: Same as the column being operated +**Return value type**: Same as the column being operated upon **Applicable column types**: Any data type @@ -576,7 +576,7 @@ SELECT LAST_ROW(field_name) FROM { tb_name | stb_name }; Query OK, 1 row(s) in set (0.001042s) ``` -### INTERP [From version 2.3.1] +### INTERP [Since version 2.3.1] ``` SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ RANGE(timestamp1,timestamp2) ] [EVERY(interval)] [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})]; @@ -584,7 +584,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ **Description**: The value that matches the specified timestamp range is returned, if existing; or an interpolation value is returned. -**Return value type**: same as the column being operated +**Return value type**: Same as the column being operated upon **Applicable column types**: Numeric data types @@ -593,7 +593,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ **More explanations** - `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter. -- The input data of `INTERP` is the value of the specified column, `where` can be used to filter the original data. If no `where` condition is specified then all original data is the input. +- The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input. - The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1<=timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified. If `RANGE` is not specified, then the timestamp of the first row that matches the filter condition is treated as timestamp1, the timestamp of the last row that matches the filter condition is treated as timestamp2. - The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter. If `EVERY` parameter is not used, the time windows will be considered as no ending timestamp, i.e. there is only one time window from timestamp1. - Interpolation is performed based on `FILL` parameter. No interpolation is performed if `FILL` is not used, that means either the original data that matches is returned or nothing is returned. @@ -608,7 +608,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ taos> SELECT INTERP(current) FROM t1 RANGE('2017-7-14 18:40:00','2017-7-14 18:40:00') FILL(LINEAR); ``` -- Get an original data every 5 seconds, no interpolation, between "2017-07-14 18:00:00" and "2017-07-14 19:00:00: +- Get original data every 5 seconds, no interpolation, between "2017-07-14 18:00:00" and "2017-07-14 19:00:00: ``` taos> SELECT INTERP(current) FROM t1 RANGE('2017-7-14 18:00:00','2017-7-14 19:00:00') EVERY(5s); @@ -632,7 +632,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ taos> SELECT INTERP(current) FROM t1 where ts >= '2017-07-14 17:00:00' and ts <= '2017-07-14 20:00:00' RANGE('2017-7-14 18:00:00','2017-7-14 19:00:00') EVERY(5s) FILL(LINEAR); ``` -### INTERP [Prior to version 2.3.1] +### INTERP [Since version 2.0.15.0] ``` SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})]; @@ -640,7 +640,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL **Description**: The value of a specific column that matches the specified time slice -**Return value type**: Same as the column being operated +**Return value type**: Same as the column being operated upon **Applicable column types**: Numeric data type @@ -648,7 +648,6 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL **More explanations**: -- It can be used from version 2.0.15.0 - Time slice must be specified. If there is no data matching the specified time slice, interpolation is performed based on `FILL` parameter. Conditions such as tags or `tbname` can be used `Where` clause can be used to filter data. - The timestamp specified must be within the time range of the data rows of the table or STable. If it is beyond the valid time range, nothing is returned even with `FILL` parameter. - `INTERP` can be used to query only single time point once. `INTERP` can be used with `EVERY` to get the interpolation value every time interval. @@ -662,7 +661,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL Query OK, 1 row(s) in set (0.002652s) ``` -If there is not any data corresponding to the specified timestamp, an interpolation value is returned if interpolation policy is specified by `FILL` parameter; or nothing is returned\ +If there is no data corresponding to the specified timestamp, an interpolation value is returned if interpolation policy is specified by `FILL` parameter; or nothing is returned. ``` taos> SELECT INTERP(*) FROM meters WHERE tbname IN ('d636') AND ts='2017-7-14 18:40:00.005'; @@ -696,11 +695,11 @@ SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause]; **Parameter value range**: k: [1,100] offset_val: [0,100] -**Return value type**: Same as the column being operated +**Return value type**: Same as the column being operated upon **Applicable column types**: Any data type except form timestamp, i.e. the primary key -**Applicable versions**: From version 2.6.0.0 +**Applicable versions**: Since version 2.6.0.0 **Examples**: @@ -732,11 +731,11 @@ SELECT UNIQUE(field_name) FROM {tb_name | stb_name} [WHERE clause]; **Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword, but it can also be used to match tags or timestamp. -**Return value type**: Same as the column or tag being operated +**Return value type**: Same as the column or tag being operated upon **Applicable column types**: Any data types except for timestamp -**Applicable versions**: From version 2.6.0.0 +**Applicable versions**: Since version 2.6.0.0 **More explanations**: @@ -780,7 +779,7 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER **Description**: The different of each row with its previous row for a specific column. `ignore_negative` can be specified as 0 or 1, the default value is 1 if it's not specified. `1` means negative values are ignored. -**Return value type**: Same as the column being operated +**Return value type**: Same as the column being operated upon **Applicable column types**: Data types except for timestamp, binary, nchar and bool @@ -789,8 +788,8 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER **More explanations**: - The number of result rows is the number of rows subtracted by one, no output for the first row -- From version 2.1.30, `DIFF` can be used on STable with `GROUP by tbname` -- From version 2.6.0, `ignore_negative` parameter is supported +- Since version 2.1.30, `DIFF` can be used on STable with `GROUP by tbname` +- Since version 2.6.0, `ignore_negative` parameter is supported **Examples**: @@ -819,7 +818,7 @@ SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHER **More explanations**: -- It is available from version 2.1.3.0, the number of result rows is the number of total rows in the time range subtracted by one, no output for the first row.\ +- It is available from version 2.1.3.0, the number of result rows is the number of total rows in the time range subtracted by one, no output for the first row. - It can be used together with `GROUP BY tbname` against a STable. **Examples**: @@ -874,7 +873,7 @@ Query OK, 1 row(s) in set (0.000836s) SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The round up value of a specific column +**Description**: The rounded up value of a specific column **Return value type**: Same as the column being used @@ -882,7 +881,7 @@ SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause]; **Applicable table types**: table, STable -**Applicable nested query**: inner query and outer query +**Applicable nested query**: Inner query and outer query **More explanations**: @@ -896,9 +895,9 @@ SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause]; SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The round down value of a specific column +**Description**: The rounded down value of a specific column -**More explanations**: The restrictions are same as `CEIL` function. +**More explanations**: The restrictions are same as those of the `CEIL` function. ### ROUND @@ -906,7 +905,7 @@ SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause]; SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The round value of a specific column. +**Description**: The rounded value of a specific column. **More explanations**: The restrictions are same as `CEIL` function. @@ -933,7 +932,7 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause]; - Can only be used with aggregate functions - `Group by tbname` must be used together on a STable to force the result on a single timeline -**Applicable versions**: From 2.3.0.x +**Applicable versions**: Since 2.3.0.x ### MAVG @@ -958,7 +957,7 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause]; - Can't be used with aggregate functions. - Must be used with `GROUP BY tbname` when it's used on a STable to force the result on each single timeline. -**Applicable versions**: From 2.3.0.x +**Applicable versions**: Since 2.3.0.x ### SAMPLE @@ -981,7 +980,7 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause]; - Arithmetic operation can't be operated on the result of `SAMPLE` function - Must be used with `Group by tbname` when it's used on a STable to force the result on each single timeline -**Applicable versions**: From 2.3.0.x +**Applicable versions**: Since 2.3.0.x ### ASIN @@ -1460,8 +1459,8 @@ SELECT field_name [+|-|*|/|%][Value|field_name] FROM { tb_name | stb_name } [WH **More explanations**: -- Arithmetic operations can be performed on two or more columns, `()` can be used to control the precedence -- NULL doesn't participate the operation, if one of the operands is NULL then result is NULL +- Arithmetic operations can be performed on two or more columns, Parentheses `()` can be used to control the order of precedence. +- NULL doesn't participate in the operation i.e. if one of the operands is NULL then result is NULL. **Examples**: @@ -1586,7 +1585,7 @@ Query OK, 6 row(s) in set (0.002613s) ## Time Functions -From version 2.6.0.0, below time related functions can be used in TDengine. +Since version 2.6.0.0, below time related functions can be used in TDengine. ### NOW @@ -1840,6 +1839,8 @@ SELECT TIMEDIFF(ts_val1 | datetime_string1 | ts_col1, ts_val2 | datetime_string2 1u(microsecond),1a(millisecond),1s(second),1m(minute),1h(hour),1d(day). - The precision of the returned timestamp is same as the precision set for the current data base in use +**Applicable versions**:Since version 2.6.0.0 + **Examples**: ```sql diff --git a/docs-en/12-taos-sql/08-interval.md b/docs-en/12-taos-sql/08-interval.md index 5cc3fa8cb43749fd40b808699f82a8761525cc6a..acfb0de0e1521fd8c6a068497a3df7a17941524c 100644 --- a/docs-en/12-taos-sql/08-interval.md +++ b/docs-en/12-taos-sql/08-interval.md @@ -3,36 +3,36 @@ sidebar_label: Interval title: Aggregate by Time Window --- -Aggregate by time window is supported in TDengine. For example, each temperature sensor reports the temperature every second, the average temperature every 10 minutes can be retrieved by query with time window. -Window related clauses are used to divide the data set to be queried into subsets and then aggregate. There are three kinds of windows, time window, status window, and session window. There are two kinds of time windows, sliding window and flip time window. +Aggregation by time window is supported in TDengine. For example, in the case where temperature sensors report the temperature every seconds, the average temperature for every 10 minutes can be retrieved by performing a query with a time window. +Window related clauses are used to divide the data set to be queried into subsets and then aggregation is performed across the subsets. There are three kinds of windows: time window, status window, and session window. There are two kinds of time windows: sliding window and flip time/tumbling window. ## Time Window -`INTERVAL` clause is used to generate time windows of same time interval, `SLIDING` is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining continuous query both the size of time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e] ,[t1s , t1e], [t2s, t2e] are respectively the time range of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time window. +The `INTERVAL` clause is used to generate time windows of the same time interval. The `SLIDING` parameter is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining a continuous query, both the size of the time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e] ,[t1s , t1e], [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time/tumbling window. -![Time Window](/img/sql/timewindow-1.png) +![TDengine Database Time Window](./timewindow-1.webp) -`INTERVAL` and `SLIDING` should be used with aggregate functions and selection functions. Below SQL statement is illegal because no aggregate or selection function is used with `INTERVAL`. +`INTERVAL` and `SLIDING` should be used with aggregate functions and select functions. The SQL statement below is illegal because no aggregate or selection function is used with `INTERVAL`. ``` SELECT * FROM temp_tb_1 INTERVAL(1m); ``` -The time step specified by `SLIDING` can't exceed the time interval specified by `INTERVAL`. Below SQL statement is illegal because the time length specified by `SLIDING` exceeds that specified by `INTERVAL`. +The time step specified by `SLIDING` cannot exceed the time interval specified by `INTERVAL`. The SQL statement below is illegal because the time length specified by `SLIDING` exceeds that specified by `INTERVAL`. ``` SELECT COUNT(*) FROM temp_tb_1 INTERVAL(1m) SLIDING(2m); ``` -When the time length specified by `SLIDING` is same as that specified by `INTERVAL`, sliding window is actually flip window. The minimum time range specified by `INTERVAL` is 10 milliseconds (10a) prior to version 2.1.5.0. From version 2.1.5.0, the minimum time range by `INTERVAL` can be 1 microsecond (1u). However, if the DB precision is millisecond, the minimum time range is 1 millisecond (1a). Please be noted that the `timezone` parameter should be configured to same value in the `taos.cfg` configuration file on client side and server side. +When the time length specified by `SLIDING` is the same as that specified by `INTERVAL`, the sliding window is actually a flip/tumbling window. The minimum time range specified by `INTERVAL` is 10 milliseconds (10a) prior to version 2.1.5.0. Since version 2.1.5.0, the minimum time range by `INTERVAL` can be 1 microsecond (1u). However, if the DB precision is millisecond, the minimum time range is 1 millisecond (1a). Please note that the `timezone` parameter should be configured to be the same value in the `taos.cfg` configuration file on client side and server side. ## Status Window -In case of using integer, bool, or string to represent the device status at a moment, the continuous rows with same status belong to same status window. Once the status changes, the status window closes. As shown in the following figure,there are two status windows according to status, [2019-04-28 14:22:07,2019-04-28 14:22:10] and [2019-04-28 14:22:11,2019-04-28 14:22:12]. Status window is not applicable to STable for now. +In case of using integer, bool, or string to represent the status of a device at any given moment, continuous rows with the same status belong to a status window. Once the status changes, the status window closes. As shown in the following figure, there are two status windows according to status, [2019-04-28 14:22:07,2019-04-28 14:22:10] and [2019-04-28 14:22:11,2019-04-28 14:22:12]. Status window is not applicable to STable for now. -![Status Window](/img/sql/timewindow-3.png) +![TDengine Database Status Window](./timewindow-3.webp) -`STATE_WINDOW` is used to specify the column based on which to define status window, for example: +`STATE_WINDOW` is used to specify the column on which the status window will be based. For example: ``` SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status); @@ -44,9 +44,9 @@ SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status); SELECT COUNT(*), FIRST(ts) FROM temp_tb_1 SESSION(ts, tol_val); ``` -The primary key, i.e. timestamp, is used to determine which session window the row belongs to. If the time interval between two adjacent rows is within the time range specified by `tol_val`, they belong to same session window; otherwise they belong to two different time windows. As shown in the figure below, if the limit of time interval for session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10,2019-04-28 14:22:30] and [2019-04-28 14:23:10,2019-04-28 14:23:30], because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds. +The primary key, i.e. timestamp, is used to determine which session window a row belongs to. If the time interval between two adjacent rows is within the time range specified by `tol_val`, they belong to the same session window; otherwise they belong to two different session windows. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10,2019-04-28 14:22:30] and [2019-04-28 14:23:10,2019-04-28 14:23:30], because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds. -![Session Window](/img/sql/timewindow-2.png) +![TDengine Database Session Window](./timewindow-2.webp) If the time interval between two continuous rows are within the time interval specified by `tol_value` they belong to the same session window; otherwise a new session window is started automatically. Session window is not supported on STable for now. @@ -54,7 +54,7 @@ If the time interval between two continuous rows are within the time interval sp ### Syntax -The full syntax of aggregate by window is as following: +The full syntax of aggregate by window is as follows: ```sql SELECT function_list FROM tb_name @@ -73,11 +73,11 @@ SELECT function_list FROM stb_name ### Restrictions -- Aggregate functions and selection functions can be used in `function_list`, with each function having only one output, for example COUNT, AVG, SUM, STDDEV, LEASTSQUARES, PERCENTILE, MIN, MAX, FIRST, LAST. Functions having multiple output can't be used, for example DIFF or arithmetic operations. +- Aggregate functions and select functions can be used in `function_list`, with each function having only one output. For example COUNT, AVG, SUM, STDDEV, LEASTSQUARES, PERCENTILE, MIN, MAX, FIRST, LAST. Functions having multiple outputs, such as DIFF or arithmetic operations can't be used. - `LAST_ROW` can't be used together with window aggregate. - Scalar functions, like CEIL/FLOOR, can't be used with window aggregate. - `WHERE` clause can be used to specify the starting and ending time and other filter conditions -- `FILL` clause is used to specify how to fill when there is data missing in any window, including: \ +- `FILL` clause is used to specify how to fill when there is data missing in any window, including: 1. NONE: No fill (the default fill mode) 2. VALUE:Fill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` 3. PREV:Fill with the previous non-NULL value, `FILL(PREV)` @@ -87,22 +87,23 @@ SELECT function_list FROM stb_name :::info -1. Huge volume of interpolation output may be returned using `FILL`, so it's recommended to specify the time range when using `FILL`. The maximum interpolation values that can be returned in single query is 10,000,000. -2. The result set is in the ascending order of timestamp in aggregate by time window aggregate. +1. A huge volume of interpolation output may be returned using `FILL`, so it's recommended to specify the time range when using `FILL`. The maximum number of interpolation values that can be returned in a single query is 10,000,000. +2. The result set is in ascending order of timestamp when you aggregate by time window. 3. If aggregate by window is used on STable, the aggregate function is performed on all the rows matching the filter conditions. If `GROUP BY` is not used in the query, the result set will be returned in ascending order of timestamp; otherwise the result set is not exactly in the order of ascending timestamp in each group. - ::: + +::: Aggregate by time window is also used in continuous query, please refer to [Continuous Query](/develop/continuous-query). ## Examples -The table of intelligent meters can be created like below SQL statement: +A table of intelligent meters can be created by the SQL statement below: ```sql CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT); ``` -The average current, maximum current and median of current in every 10 minutes of the past 24 hours can be calculated using below SQL statement, with missing value filled with the previous non-NULL value. +The average current, maximum current and median of current in every 10 minutes for the past 24 hours can be calculated using the SQL statement below, with missing values filled with the previous non-NULL values. ``` SELECT AVG(current), MAX(current), APERCENTILE(current, 50) FROM meters diff --git a/docs-en/12-taos-sql/09-limit.md b/docs-en/12-taos-sql/09-limit.md index 873e484fbb4731294d00df323f8e0d2cbc6b1d30..db55cdd69e7bd29ca66ee15b61f28991568d9556 100644 --- a/docs-en/12-taos-sql/09-limit.md +++ b/docs-en/12-taos-sql/09-limit.md @@ -4,9 +4,9 @@ title: Limits & Restrictions ## Naming Rules -1. Only English characters, digits and underscore are allowed -2. Can't be started with digits -3. Case Insensitive without escape character "\`" +1. Only characters from the English alphabet, digits and underscore are allowed +2. Names cannot start with a digit +3. Case insensitive without escape character "\`" 4. Identifier with escape character "\`" To support more flexible table or column names, a new escape character "\`" is introduced. For more details please refer to [escape](/taos-sql/escape). @@ -16,38 +16,38 @@ The legal character set is `[a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/]`. ## General Limits -- Maximum length of database name is 32 bytes -- Maximum length of table name is 192 bytes, excluding the database name prefix and the separator -- Maximum length of each data row is 48K bytes from version 2.1.7.0 , before which the limit is 16K bytes. Please be noted that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type. -- Maximum of column name is 64. +- Maximum length of database name is 32 bytes. +- Maximum length of table name is 192 bytes, excluding the database name prefix and the separator. +- Maximum length of each data row is 48K bytes since version 2.1.7.0 , before which the limit was 16K bytes. Please note that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type. +- Maximum length of column name is 64. - Maximum number of columns is 4096. There must be at least 2 columns, and the first column must be timestamp. - Maximum length of tag name is 64. - Maximum number of tags is 128. There must be at least 1 tag. The total length of tag values should not exceed 16K bytes. -- Maximum length of singe SQL statement is 1048576, i.e. 1 MB bytes. It can be configured in the parameter `maxSQLLength` in the client side, the applicable range is [65480, 1048576]. -- At most 4096 columns (or 1024 prior to 2.1.7.0) can be returned by `SELECT`, functions in the query statement may constitute columns. Error will be returned if the limit is exceeded. -- Maximum numbers of databases, STables, tables are only depending on the system resources. -- Maximum of database name is 32 bytes, can't include "." and special characters. -- Maximum replica number of database is 3 -- Maximum length of user name is 23 bytes -- Maximum length of password is 15 bytes -- Maximum number of rows depends on the storage space only. -- Maximum number of tables depends on the number of nodes only. -- Maximum number of databases depends on the number of nodes only. -- Maximum number of vnodes for single database is 64. +- Maximum length of singe SQL statement is 1048576, i.e. 1 MB. It can be configured in the parameter `maxSQLLength` in the client side, the applicable range is [65480, 1048576]. +- At most 4096 columns (or 1024 prior to 2.1.7.0) can be returned by `SELECT`. Functions in the query statement constitute columns. An error is returned if the limit is exceeded. +- Maximum numbers of databases, STables, tables are dependent only on the system resources. +- Maximum of database name is 32 bytes, and it can't include "." or special characters. +- Maximum number of replicas for a database is 3. +- Maximum length of user name is 23 bytes. +- Maximum length of password is 15 bytes. +- Maximum number of rows depends only on the storage space. +- Maximum number of tables depends only on the number of nodes. +- Maximum number of databases depends only on the number of nodes. +- Maximum number of vnodes for a single database is 64. ## Restrictions of `GROUP BY` -`GROUP BY` can be performed on tags and `TBNAME`. It can be performed on data columns too, with one restriction that only one column and the number of unique values on that column is lower than 100,000. Please be noted that `GROUP BY` can't be performed on float or double type. +`GROUP BY` can be performed on tags and `TBNAME`. It can be performed on data columns too, with the only restriction being it can only be performed on one data column and the number of unique values in that column is lower than 100,000. Please note that `GROUP BY` cannot be performed on float or double types. ## Restrictions of `IS NOT NULL` -`IS NOT NULL` can be used on any data type of columns. The non-empty string evaluation expression, i.e. `<\>""` can only be used on non-numeric data types. +`IS NOT NULL` can be used on any data type of columns. The non-empty string evaluation expression, i.e. `< > ""` can only be used on non-numeric data types. ## Restrictions of `ORDER BY` -- Only one `order by` is allowed for normal table and sub table. +- Only one `order by` is allowed for normal table and subtable. - At most two `order by` are allowed for STable, and the second one must be `ts`. -- `order by tag` must be used with `group by tag` on same tag, this rule is also applicable to `tbname`. +- `order by tag` must be used with `group by tag` on same tag. This rule is also applicable to `tbname`. - `order by column` must be used with `group by column` or `top/bottom` on same column. This rule is applicable to table and STable. - `order by ts` is applicable to table and STable. - If `order by ts` is used with `group by`, the result set is sorted using `ts` in each group. @@ -56,11 +56,11 @@ The legal character set is `[a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/]`. ### Name Restrictions of Table/Column -The name of a table or column can only be composed of ASCII characters, digits and underscore, while digit can't be used as the beginning. The maximum length is 192 bytes. Names are case insensitive. The name mentioned in this rule doesn't include the database name prefix and the separator. +The name of a table or column can only be composed of ASCII characters, digits and underscore and it cannot start with a digit. The maximum length is 192 bytes. Names are case insensitive. The name mentioned in this rule doesn't include the database name prefix and the separator. ### Name Restrictions After Escaping -To support more flexible table or column names, new escape character "`" is introduced in TDengine to avoid the conflict between table name and keywords and break the above restrictions for table name. The escape character is not counted in the length of table name. +To support more flexible table or column names, new escape character "\`" is introduced in TDengine to avoid the conflict between table name and keywords and break the above restrictions for table names. The escape character is not counted in the length of table name. With escaping, the string inside escape characters are case sensitive, i.e. will not be converted to lower case internally. diff --git a/docs-en/12-taos-sql/10-json.md b/docs-en/12-taos-sql/10-json.md index 60468f1e0fd75cc04cae8a91b0a1a22b9bd3600b..7460a5e0ba3ce78ee7744569cda460c477cac19c 100644 --- a/docs-en/12-taos-sql/10-json.md +++ b/docs-en/12-taos-sql/10-json.md @@ -4,7 +4,7 @@ title: JSON Type ## Syntax -1. Tag of JSON type +1. Tag of type JSON ```sql create STable s1 (ts timestamp, v1 int) tags (info json); @@ -12,7 +12,7 @@ title: JSON Type create table s1_1 using s1 tags ('{"k1": "v1"}'); ``` -2. -> Operator of JSON +2. "->" Operator of JSON ```sql select * from s1 where info->'k1' = 'v1'; @@ -20,7 +20,7 @@ title: JSON Type select info->'k1' from s1; ``` -3. contains Operator of JSON +3. "contains" Operator of JSON ```sql select * from s1 where info contains 'k2'; @@ -30,7 +30,7 @@ title: JSON Type ## Applicable Operations -1. When JSON data type is used in `where`, `match/nmatch/between and/like/and/or/is null/is no null` can be used but `in` can't be used. +1. When a JSON data type is used in `where`, `match/nmatch/between and/like/and/or/is null/is no null` can be used but `in` can't be used. ```sql select * from s1 where info->'k1' match 'v*'; @@ -42,9 +42,9 @@ title: JSON Type select * from s1 where info->'k1' is not null; ``` -2. Tag of JSON type can be used in `group by`, `order by`, `join`, `union all` and sub query, for example `group by json->'key'` +2. A tag of JSON type can be used in `group by`, `order by`, `join`, `union all` and sub query; for example `group by json->'key'` -3. `Distinct` can be used with tag of JSON type +3. `Distinct` can be used with a tag of type JSON ```sql select distinct info->'k1' from s1; @@ -52,29 +52,29 @@ title: JSON Type 4. Tag Operations - The value of JSON tag can be altered. Please be noted that the full JSON will be override when doing this. + The value of a JSON tag can be altered. Please note that the full JSON will be overriden when doing this. - The name of JSON tag can be altered. A tag of JSON type can't be added or removed. The column length of a JSON tag can't be changed. + The name of a JSON tag can be altered. A tag of JSON type can't be added or removed. The column length of a JSON tag can't be changed. ## Other Restrictions -- JSON type can only be used for tag. There can be only one tag of JSON type, and it's exclusive to any other types of tag. +- JSON type can only be used for a tag. There can be only one tag of JSON type, and it's exclusive to any other types of tags. - The maximum length of keys in JSON is 256 bytes, and key must be printable ASCII characters. The maximum total length of a JSON is 4,096 bytes. - JSON format: - - The input string for JSON can be empty, i.e. "", "\t", or NULL, but can't be non-NULL string, bool or array. - - object can be {}, and the whole JSON is empty if so. Key can be "", and it's ignored if so. - - value can be int, double, string, boll or NULL, can't be array. Nesting is not allowed, that means value can't be another JSON. + - The input string for JSON can be empty, i.e. "", "\t", or NULL, but it can't be non-NULL string, bool or array. + - object can be {}, and the entire JSON is empty if so. Key can be "", and it's ignored if so. + - value can be int, double, string, bool or NULL, and it can't be an array. Nesting is not allowed which means that the value of a key can't be JSON. - If one key occurs twice in JSON, only the first one is valid. - Escape characters are not allowed in JSON. -- NULL is returned if querying a key that doesn't exist in JSON. +- NULL is returned when querying a key that doesn't exist in JSON. - If a tag of JSON is the result of inner query, it can't be parsed and queried in the outer query. -For example, below SQL statements are not supported. +For example, the SQL statements below are not supported. ```sql; select jtag->'key' from (select jtag from STable); diff --git a/docs-en/12-taos-sql/index.md b/docs-en/12-taos-sql/index.md index 611f2bf75eb2a234ae139ce65f2e78d356483bb7..33656338a7bba38dc55cf536bdba8e95309c5acf 100644 --- a/docs-en/12-taos-sql/index.md +++ b/docs-en/12-taos-sql/index.md @@ -3,11 +3,9 @@ title: TDengine SQL description: "The syntax supported by TDengine SQL " --- -This section explains the syntax about operating database, table, STable, inserting data, selecting data, functions and some tips that can be used in TDengine SQL. It would be easier to understand with some fundamental knowledge of SQL. +This section explains the syntax of SQL to perform operations on databases, tables and STables, insert data, select data and use functions. We also provide some tips that can be used in TDengine SQL. If you have previous experience with SQL this section will be fairly easy to understand. If you do not have previous experience with SQL, you'll come to appreciate the simplicity and power of SQL. -TDengine SQL is the major interface for users to write data into or query from TDengine. For users to easily use, syntax similar to standard SQL is provided. However, please be noted that TDengine SQL is not standard SQL. Besides, because TDengine doesn't provide the functionality of deleting time series data, corresponding statements are not provided in TDengine SQL. - -TDengine SQL doesn't support abbreviation for keywords, for example `DESCRIBE` can't be abbreviated as `DESC`. +TDengine SQL is the major interface for users to write data into or query from TDengine. For ease of use, the syntax is similar to that of standard SQL. However, please note that TDengine SQL is not standard SQL. For instance, TDengine doesn't provide a delete function for time series data and so corresponding statements are not provided in TDengine SQL. Syntax Specifications used in this chapter: @@ -16,7 +14,7 @@ Syntax Specifications used in this chapter: - | means one of a few options, excluding | itself. - … means the item prior to it can be repeated multiple times. -To better demonstrate the syntax, usage and rules of TAOS SQL, hereinafter it's assumed that there is a data set of meters. Assuming each meter collects 3 data: current, voltage, phase. The data model is as below: +To better demonstrate the syntax, usage and rules of TAOS SQL, hereinafter it's assumed that there is a data set of data from electric meters. Each meter collects 3 data measurements: current, voltage, phase. The data model is shown below: ```sql taos> DESCRIBE meters; @@ -30,4 +28,4 @@ taos> DESCRIBE meters; groupid | INT | 4 | TAG | ``` -The data set includes the data collected by 4 meters, the corresponding table name is d1001, d1002, d1003, d1004 respectively based on the data model of TDengine. +The data set includes the data collected by 4 meters, the corresponding table name is d1001, d1002, d1003 and d1004 based on the data model of TDengine. diff --git a/docs-en/12-taos-sql/timewindow-1.webp b/docs-en/12-taos-sql/timewindow-1.webp new file mode 100644 index 0000000000000000000000000000000000000000..82747558e96df752a0010d85be79a4af07e4a1df Binary files /dev/null and b/docs-en/12-taos-sql/timewindow-1.webp differ diff --git a/docs-en/12-taos-sql/timewindow-2.webp b/docs-en/12-taos-sql/timewindow-2.webp new file mode 100644 index 0000000000000000000000000000000000000000..8f1314ae34f7f5c5cca1d3cb80455f555fad38c3 Binary files /dev/null and b/docs-en/12-taos-sql/timewindow-2.webp differ diff --git a/docs-en/12-taos-sql/timewindow-3.webp b/docs-en/12-taos-sql/timewindow-3.webp new file mode 100644 index 0000000000000000000000000000000000000000..5bd16e68e7fd5da6805551e9765975277cd5d4d9 Binary files /dev/null and b/docs-en/12-taos-sql/timewindow-3.webp differ diff --git a/docs-en/13-operation/01-pkg-install.md b/docs-en/13-operation/01-pkg-install.md index a1aad1c3c96c52689e9f68509c27ccce574d2082..c098002962d62aa0acc7a94462c052303cb2ed90 100644 --- a/docs-en/13-operation/01-pkg-install.md +++ b/docs-en/13-operation/01-pkg-install.md @@ -6,7 +6,7 @@ description: Install, Uninstall, Start, Stop and Upgrade import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; -TDengine community version provides dev and rpm package for users to choose based on the system environment. deb supports Debian, Ubuntu and systems derived from them. rpm supports CentOS, RHEL, SUSE and systems derived from them. Furthermore, tar.gz package is provided for enterprise customers. +TDengine community version provides deb and rpm packages for users to choose from, based on their system environment. The deb package supports Debian, Ubuntu and derivative systems. The rpm package supports CentOS, RHEL, SUSE and derivative systems. Furthermore, a tar.gz package is provided for TDengine Enterprise customers. ## Install @@ -14,7 +14,7 @@ TDengine community version provides dev and rpm package for users to choose base 1. Download deb package from official website, for example TDengine-server-2.4.0.7-Linux-x64.deb -2. In the directory where the package is located, execute below command +2. In the directory where the package is located, execute the command below ```bash $ sudo dpkg -i TDengine-server-2.4.0.7-Linux-x64.deb @@ -46,7 +46,7 @@ TDengine is installed successfully! 1. Download rpm package from official website, for example TDengine-server-2.4.0.7-Linux-x64.rpm; -2. In the directory where the package is located, execute below command +2. In the directory where the package is located, execute the command below ``` $ sudo rpm -ivh TDengine-server-2.4.0.7-Linux-x64.rpm @@ -77,7 +77,7 @@ TDengine is installed successfully! 1. Download the tar.gz package, for example TDengine-server-2.4.0.7-Linux-x64.tar.gz; - 2、In the directory where the package is located, firstly decompress the file, then switch to the sub-directory generated in decompressing, i.e. "TDengine-enterprise-server-2.4.0.7/" in this example, and execute the `install.sh` script. +2. In the directory where the package is located, first decompress the file, then switch to the sub-directory generated in decompressing, i.e. "TDengine-enterprise-server-2.4.0.7/" in this example, and execute the `install.sh` script. ```bash $ tar xvzf TDengine-enterprise-server-2.4.0.7-Linux-x64.tar.gz @@ -124,7 +124,7 @@ taoskeeper is installed, enable it by `systemctl enable taoskeeper` ``` :::info -Some configuration will be prompted for users to provide when install.sh is executing, the interactive mode can be disabled by executing `./install.sh -e no`. `./install -h` can show all parameters and detailed explanation. +Users will be prompted to enter some configuration information when install.sh is executing. The interactive mode can be disabled by executing `./install.sh -e no`. `./install.sh -h` can show all parameters with detailed explanation. ::: @@ -132,7 +132,7 @@ Some configuration will be prompted for users to provide when install.sh is exec :::note -When installing on the first node in the cluster, when "Enter FQDN:" is prompted, nothing needs to be provided. When installing on following nodes, when "Enter FQDN:" is prompted, the end point of the first dnode in the cluster can be input if it has been already up; or just ignore it and configure later after installation is done. +When installing on the first node in the cluster, at the "Enter FQDN:" prompt, nothing needs to be provided. When installing on subsequent nodes, at the "Enter FQDN:" prompt, you must enter the end point of the first dnode in the cluster if it is already up. You can also just ignore it and configure it later after installation is finished. ::: @@ -181,14 +181,14 @@ taosKeeper is removed successfully! :::note -- It's strongly suggested not to use multiple kinds of installation packages on single host TDengine -- After deb package is installed, if the installation directory is removed manually so that uninstall or reinstall can't succeed, it can be resolved by cleaning up TDengine package information as below command and then reinstalling. +- We strongly recommend not to use multiple kinds of installation packages on a single host TDengine. +- After deb package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information. You can then reinstall if needed. ```bash $ sudo rm -f /var/lib/dpkg/info/tdengine* ``` -- After rpm package is installed, if the installation directory is removed manually so that uninstall or reinstall can't succeed, it can be resolved by cleaning up TDengine package information as below command and then reinstalling. +- After rpm package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information. You can then reinstall if needed. ```bash $ sudo rpm -e --noscripts tdengine @@ -219,7 +219,7 @@ lrwxrwxrwx 1 root root 13 Feb 22 09:34 log -> /var/log/taos/ During the installation process: - Configuration directory, data directory, and log directory are created automatically if they don't exist -- The default configuration file is located at /etc/taos/taos.cfg, which is a copy of /usr/local/taos/cfg/taos.cfg if not existing +- The default configuration file is located at /etc/taos/taos.cfg, which is a copy of /usr/local/taos/cfg/taos.cfg - The default data directory is /var/lib/taos, which is a soft link to /usr/local/taos/data - The default log directory is /var/log/taos, which is a soft link to /usr/local/taos/log - The executables at /usr/local/taos/bin are linked to /usr/bin @@ -228,14 +228,14 @@ During the installation process: :::note -- When TDengine is uninstalled, the configuration /etc/taos/taos.cfg, data directory /var/lib/taos, log directory /var/log/taos are kept. They can be deleted manually with caution because data can't be recovered once +- When TDengine is uninstalled, the configuration /etc/taos/taos.cfg, data directory /var/lib/taos, log directory /var/log/taos are kept. They can be deleted manually with caution, because data can't be recovered. Please follow data integrity, security, backup or relevant SOPs before deleting any data. - When reinstalling TDengine, if the default configuration file /etc/taos/taos.cfg exists, it will be kept and the configuration file in the installation package will be renamed to taos.cfg.orig and stored at /usr/local/taos/cfg to be used as configuration sample. Otherwise the configuration file in the installation package will be installed to /etc/taos/taos.cfg and used. ## Start and Stop -Linux system services `systemd`, `systemctl` or `service` is used to start, stop and restart TDengine. The server process of TDengine is `taosd`, which is started automatically after the Linux system is started. System operator can use `systemd`, `systemctl` or `service` to start, stop or restart TDengine server. +Linux system services `systemd`, `systemctl` or `service` are used to start, stop and restart TDengine. The server process of TDengine is `taosd`, which is started automatically after the Linux system is started. System operators can use `systemd`, `systemctl` or `service` to start, stop or restart TDengine server. -For example, if using `systemctl` , the commands to start, stop, restart and check TDengine server are as below: +For example, if using `systemctl` , the commands to start, stop, restart and check TDengine server are below: - Start server:`systemctl start taosd` @@ -263,20 +263,22 @@ Active: inactive (dead) There are two aspects in upgrade operation: upgrade installation package and upgrade a running server. -Upgrading package should follow the steps mentioned previously to firstly uninstall old version then install new version. +To upgrade a package, follow the steps mentioned previously to first uninstall the old version then install the new version. -Upgrading a running server is much more complex. Firstly please check the version number of old version and new version. The version number of TDengine consists of 4 sections, only the first 3 section match can the old version be upgraded to the new version. The steps of upgrading a running server are as below: +Upgrading a running server is much more complex. First please check the version number of the old version and the new version. The version number of TDengine consists of 4 sections, only if the first 3 sections match can the old version be upgraded to the new version. The steps of upgrading a running server are as below: - Stop inserting data -- Make sure all data persisted into disk +- Make sure all data is persisted to disk +- Make some simple queries (Such as total rows in stables, tables and so on. Note down the values. Follow best practices and relevant SOPs.) - Stop the cluster of TDengine - Uninstall old version and install new version - Start the cluster of TDengine -- Make some simple queries to make sure no data loss -- Make some simple data insertion to make sure the cluster works well -- Restore business data +- Execute simple queries, such as the ones executed prior to installing the new package, to make sure there is no data loss +- Run some simple data insertion statements to make sure the cluster works well +- Restore business services :::warning + TDengine doesn't guarantee any lower version is compatible with the data generated by a higher version, so it's never recommended to downgrade the version. ::: diff --git a/docs-en/13-operation/02-planning.mdx b/docs-en/13-operation/02-planning.mdx index 35a34aebc088c233ed9fc39723e8890ebc56e124..c1baf92dbfa8d93f83174c05c2ea631d1a469739 100644 --- a/docs-en/13-operation/02-planning.mdx +++ b/docs-en/13-operation/02-planning.mdx @@ -2,19 +2,19 @@ title: Resource Planning --- -The computing and storage resources need to be planned if using TDengine to build an IoT platform. How to plan the CPU, memory and disk required will be described in this chapter. +It is important to plan computing and storage resources if using TDengine to build an IoT, time-series or Big Data platform. How to plan the CPU, memory and disk resources required, will be described in this chapter. ## Memory Requirement of Server Side -The number of vgroups created for each database is same as the number of CPU cores by default and can be configured by parameter `maxVgroupsPerDb`, each vnode in a vgroup stores one replica. Each vnode consumes fixed size of memory, i.e. `blocks` \* `cache`. Besides, some memory is required for tag values associated with each table. A fixed amount of memory is required for each cluster. So, the memory required for each DB can be calculated using below formula: +By default, the number of vgroups created for each database is the same as the number of CPU cores. This can be configured by the parameter `maxVgroupsPerDb`. Each vnode in a vgroup stores one replica. Each vnode consumes a fixed amount of memory, i.e. `blocks` \* `cache`. In addition, some memory is required for tag values associated with each table. A fixed amount of memory is required for each cluster. So, the memory required for each DB can be calculated using the formula below: ``` Database Memory Size = maxVgroupsPerDb * replica * (blocks * cache + 10MB) + numOfTables * (tagSizePerTable + 0.5KB) ``` -For example, assuming the default value of `maxVgroupPerDB` is 64, the default value of `cache` 16M, the default value of `blocks` is 6, there are 100,000 tables in a DB, the replica number is 1, total length of tag values is 256 bytes, the total memory required for this DB is: 64 \* 1 \* (16 \* 6 + 10) + 100000 \* (0.25 + 0.5) / 1000 = 6792M. +For example, assuming the default value of `maxVgroupPerDB` is 64, the default value of `cache` is 16M, the default value of `blocks` is 6, there are 100,000 tables in a DB, the replica number is 1, total length of tag values is 256 bytes, the total memory required for this DB is: 64 \* 1 \* (16 \* 6 + 10) + 100000 \* (0.25 + 0.5) / 1000 = 6792M. -In real operation of TDengine, we are more concerned about the memory used by each TDengine server process `taosd`. +In the real operation of TDengine, we are more concerned about the memory used by each TDengine server process `taosd`. ``` taosd_memory = vnode_memory + mnode_memory + query_memory @@ -22,29 +22,29 @@ In real operation of TDengine, we are more concerned about the memory used by ea In the above formula: -1. "vnode_memory" of a `taosd` process is the memory used by all vnodes hosted by this `taosd` process. It can be roughly calculated by firstly adding up the total memory of all DBs whose memory usage can be derived according to the formula mentioned previously then dividing by number of dnodes and multiplying the number of replicas. +1. "vnode_memory" of a `taosd` process is the memory used by all vnodes hosted by this `taosd` process. It can be roughly calculated by firstly adding up the total memory of all DBs whose memory usage can be derived according to the formula for Database Memory Size, mentioned above, then dividing by number of dnodes and multiplying the number of replicas. ``` - vnode_memory = sum(Database memory) / number_of_dnodes \* replica + vnode_memory = (sum(Database Memory Size) / number_of_dnodes) * replica ``` 2. "mnode_memory" of a `taosd` process is the memory consumed by a mnode. If there is one (and only one) mnode hosted in a `taosd` process, the memory consumed by "mnode" is "0.2KB \* the total number of tables in the cluster". 3. "query_memory" is the memory used when processing query requests. Each ongoing query consumes at least "0.2 KB \* total number of involved tables". -Please be noted that the above formulas can only be used to estimate the minimum memory requirement, instead of maximum memory usage. In a real production environment, it's better to preserve some redundance beyond the estimated minimum memory requirement. If memory is abundant, it's suggested to increase the value of parameter `blocks` to speed up data insertion and data query. +Please note that the above formulas can only be used to estimate the minimum memory requirement, instead of maximum memory usage. In a real production environment, it's better to reserve some redundance beyond the estimated minimum memory requirement. If memory is abundant, it's suggested to increase the value of parameter `blocks` to speed up data insertion and data query. ## Memory Requirement of Client Side -The client programs use TDengine client driver `taosc` to connect to the server side, there is also memory requirement for a client program. +For the client programs using TDengine client driver `taosc` to connect to the server side there is a memory requirement as well. -The memory consumed by a client program is mainly about the SQL statements for data insertion, caching of table metadata, and some internal use. Assuming maximum number of tables is N (the memory consumed by the metadata of each table is 256 bytes), maximum number of threads for parallel insertion is T, maximum length of a SQL statement is S (normally 1 MB), the memory required by a client program can be estimated using below formula: +The memory consumed by a client program is mainly about the SQL statements for data insertion, caching of table metadata, and some internal use. Assuming maximum number of tables is N (the memory consumed by the metadata of each table is 256 bytes), maximum number of threads for parallel insertion is T, maximum length of a SQL statement is S (normally 1 MB), the memory required by a client program can be estimated using the below formula: ``` M = (T * S * 3 + (N / 4096) + 100) ``` -For example, if the number of parallel data insertion threads is 100, total number of tables is 10,000,000, then minimum memory requirement of a client program is: +For example, if the number of parallel data insertion threads is 100, total number of tables is 10,000,000, then the minimum memory requirement of a client program is: ``` 100 * 3 + (10000000 / 4096) + 100 = 2741 (MBytes) @@ -56,10 +56,10 @@ So, at least 3GB needs to be reserved for such a client. The CPU resources required depend on two aspects: -- **Data Insertion** Each dnode of TDengine can process at least 10,000 insertion requests in one second, while each insertion request can have multiple rows. The computing resource consumed between inserting 1 row one time and inserting 10 rows one time is very small. So, the more the rows to insert one time, the higher the efficiency. Inserting in bach also exposes requirement for the client side which needs to cache rows and insert in batch once the cached rows reaches a threshold. -- **Data Query** High efficiency query is provided in TDengine, but it's hard to estimate the CPU resource required because the queries used in different use cases and the frequency of queries vary significantly. It can only be verified with the query statements, query frequency, data size to be queried, etc provided by user. +- **Data Insertion** Each dnode of TDengine can process at least 10,000 insertion requests in one second, while each insertion request can have multiple rows. The difference in computing resource consumed, between inserting 1 row at a time, and inserting 10 rows at a time is very small. So, the more the number of rows that can be inserted one time, the higher the efficiency. Inserting in batch also imposes requirements on the client side which needs to cache rows to insert in batch once the number of cached rows reaches a threshold. +- **Data Query** High efficiency query is provided in TDengine, but it's hard to estimate the CPU resource required because the queries used in different use cases and the frequency of queries vary significantly. It can only be verified with the query statements, query frequency, data size to be queried, and other requirements provided by users. -In short words, the CPU resource required for data insertion can be estimated but it's hard to do so for query use cases. In real operation, it's suggested to control CPU usage below 50%. If this threshold is exceeded, it's a reminder for system operator to add more nodes in the cluster to expand resources. +In short, the CPU resource required for data insertion can be estimated but it's hard to do so for query use cases. In real operation, it's suggested to control CPU usage below 50%. If this threshold is exceeded, it's a reminder for system operator to add more nodes in the cluster to expand resources. ## Disk Requirement @@ -69,14 +69,14 @@ The compression ratio in TDengine is much higher than that in RDBMS. In most cas Raw DataSize = numOfTables * rowSizePerTable * rowsPerTable ``` -For example, there are 10,000,000 meters, while each meter collects data every 15 minutes and the data size of each collection si 128 bytes, so the raw data size of one year is: 10000000 \* 128 \* 24 \* 60 / 15 \* 365 = 44.8512(TB). Assuming compression ratio is 5, the actual disk size is: 44.851 / 5 = 8.97024(TB). +For example, there are 10,000,000 meters, while each meter collects data every 15 minutes and the data size of each collection is 128 bytes, so the raw data size of one year is: 10000000 \* 128 \* 24 \* 60 / 15 \* 365 = 44.8512(TB). Assuming compression ratio is 5, the actual disk size is: 44.851 / 5 = 8.97024(TB). -Parameter `keep` can be used to set how long the data will be kept on disk. To further reduce storage cost, multiple storage levels can be enabled in TDengine, with the coldest data stored on the cheapest storage device, and this is transparent to application programs. +Parameter `keep` can be used to set how long the data will be kept on disk. To further reduce storage cost, multiple storage levels can be enabled in TDengine, with the coldest data stored on the cheapest storage device. This is completely transparent to application programs. -To increase the performance, multiple disks can be setup for parallel data reading or data inserting. Please be noted that expensive disk array is not necessary because replications are used in TDengine to provide high availability. +To increase performance, multiple disks can be setup for parallel data reading or data inserting. Please note that an expensive disk array is not necessary because replications are used in TDengine to provide high availability. ## Number of Hosts -A host can be either physical or virtual. The total memory, total CPU, total disk required can be estimated according to the formulas mentioned previously. Then, according to the system resources that a single host can provide, assuming all hosts are same in resources, the number of hosts can be derived easily. +A host can be either physical or virtual. The total memory, total CPU, total disk required can be estimated according to the formulae mentioned previously. Then, according to the system resources that a single host can provide, assuming all hosts have the same resources, the number of hosts can be derived easily. **Quick Estimation for CPU, Memory and Disk** Please refer to [Resource Estimate](https://www.taosdata.com/config/config.html). diff --git a/docs-en/13-operation/03-tolerance.md b/docs-en/13-operation/03-tolerance.md index 367474cddb7395ea84a4a33623d1643e487f9d09..d4d48d7fcdc2c990b6ea0821e2347c70a809ed79 100644 --- a/docs-en/13-operation/03-tolerance.md +++ b/docs-en/13-operation/03-tolerance.md @@ -7,23 +7,26 @@ title: Fault Tolerance & Disaster Recovery TDengine uses **WAL**, i.e. Write Ahead Log, to achieve fault tolerance and high reliability. -When a data block is received by TDengine, the original data block is firstly written into WAL. The log in WAL will be deleted only after the data has been written into data files in the database. Data can be recovered from WAL in case the server is stopped abnormally due to any reason and then restarted. +When a data block is received by TDengine, the original data block is first written into WAL. The log in WAL will be deleted only after the data has been written into data files in the database. Data can be recovered from WAL in case the server is stopped abnormally for any reason and then restarted. There are 2 configuration parameters related to WAL: -- walLevel:0:wal is disabled; 1:wal is enabled without fsync; 2:wal is enabled with fsync. -- fsync:only valid when walLevel is set to 2, it specified the interval of invoking fsync. If set to 0, it means fsync is invoked immediately once WAL is written. +- walLevel: + - 0:wal is disabled + - 1:wal is enabled without fsync + - 2:wal is enabled with fsync +- fsync:This parameter is only valid when walLevel is set to 2. It specifies the interval, in milliseconds, of invoking fsync. If set to 0, it means fsync is invoked immediately once WAL is written. -To achieve absolutely no data loss, walLevel needs to be set to 2 and fsync needs to be set to 1. The penalty is the performance of data ingestion downgrades. However, if the concurrent threads of data insertion on the client side can reach a big enough number, for example 50, the data ingestion performance would be still good enough, our verification shows that the drop is only 30% compared to fsync is set to 3,000 milliseconds. +To achieve absolutely no data loss, walLevel should be set to 2 and fsync should be set to 1. There is a performance penalty to the data ingestion rate. However, if the concurrent data insertion threads on the client side can reach a big enough number, for example 50, the data ingestion performance will be still good enough. Our verification shows that the drop is only 30% when fsync is set to 3,000 milliseconds. ## Disaster Recovery -TDengine uses replications to provide high availability and disaster recovery capability. +TDengine uses replication to provide high availability and disaster recovery capability. -TDengine cluster is managed by mnode. To make sure the high availability of mnode, multiple replicas can be configured by system parameter `numOfMnodes`. The data replication between mnode replicas is in synchronous way to guarantee the metadata consistency. +A TDengine cluster is managed by mnode. To ensure the high availability of mnode, multiple replicas can be configured by the system parameter `numOfMnodes`. The data replication between mnode replicas is performed in a synchronous way to guarantee metadata consistency. -The number of replicas for time series data in TDengine is associated with each database, there can be a lot of databases in a cluster while each database can be configured with a different number of replicas. When creating a database, parameter `replica` is used to configure the number of replications. To achieve high availability, `replica` needs to be higher than 1. +The number of replicas for time series data in TDengine is associated with each database. There can be many databases in a cluster and each database can be configured with a different number of replicas. When creating a database, parameter `replica` is used to configure the number of replications. To achieve high availability, `replica` needs to be higher than 1. -The number of dnodes in a TDengine cluster must NOT be lower than the number of replicas for any database, otherwise it would fail when trying to create table. +The number of dnodes in a TDengine cluster must NOT be lower than the number of replicas for any database, otherwise it would fail when trying to create a table. -As long as the dnodes of a TDengine cluster are deployed on different physical machines and replica number is set to bigger than 1, high availability can be achieved without any other assistance. If dnodes of TDengine cluster are deployed in geographically different data centers, disaster recovery can be achieved too. +As long as the dnodes of a TDengine cluster are deployed on different physical machines and the replica number is higher than 1, high availability can be achieved without any other assistance. For disaster recovery, dnodes of a TDengine cluster should be deployed in geographically different data centers. diff --git a/docs-en/13-operation/06-admin.md b/docs-en/13-operation/06-admin.md index 1ca0dfeaf4a4b0b4c597e1a5ec6ece20224e2dba..458a91b88c6d8319fe8b84c2b34d8ff968957910 100644 --- a/docs-en/13-operation/06-admin.md +++ b/docs-en/13-operation/06-admin.md @@ -2,7 +2,7 @@ title: User Management --- -System operator can use TDengine CLI `taos` to create or remove user or change password. The SQL command is as low: +A system operator can use TDengine CLI `taos` to create or remove users or change passwords. The SQL commands are documented below: ## Create User @@ -10,7 +10,7 @@ System operator can use TDengine CLI `taos` to create or remove user or change p CREATE USER PASS <'password'>; ``` -When creating a user and specifying the user name and password, password needs to be quoted using single quotes. +When creating a user and specifying the user name and password, the password needs to be quoted using single quotes. ## Drop User @@ -18,7 +18,7 @@ When creating a user and specifying the user name and password, password needs t DROP USER ; ``` -Drop a user can only be performed by root. +Dropping a user can only be performed by root. ## Change Password @@ -26,7 +26,7 @@ Drop a user can only be performed by root. ALTER USER PASS <'password'>; ``` -To keep the case of the password when changing password, password needs to be quoted using single quotes. +To keep the case of the password when changing password, the password needs to be quoted using single quotes. ## Change Privilege @@ -36,7 +36,7 @@ ALTER USER PRIVILEGE ; The privileges that can be changed to are `read` or `write` without single quotes. -Note:there is another privilege `super`, which not allowed to be authorized to any user. +Note:there is another privilege `super`, which is not allowed to be authorized to any user. ## Show Users @@ -45,6 +45,6 @@ SHOW USERS; ``` :::note -In SQL syntax, `< >` means the part that needs to be input by user, excluding the `< >` itself. +In SQL syntax, `< >` means the part that needs to be input by the user, excluding the `< >` itself. ::: diff --git a/docs-en/13-operation/07-import.md b/docs-en/13-operation/07-import.md index befca38652abadca60b62721754de7ab718f65ea..8362cec1ab3072866018678b42a679d0c19b49de 100644 --- a/docs-en/13-operation/07-import.md +++ b/docs-en/13-operation/07-import.md @@ -2,26 +2,26 @@ title: Data Import --- -There are multiple ways of importing data provided byTDengine: import with script, import from data file, import using `taosdump`. +There are multiple ways of importing data provided by TDengine: import with script, import from data file, import using `taosdump`. ## Import Using Script -TDengine CLI `taos` supports `source ` command for executing the SQL statements in the file in batch. The SQL statements for creating databases, creating tables, and inserting rows can be written in single file with one statement on each line, then the file can be executed using `source` command in TDengine CLI `taos` to execute the SQL statements in order and in batch. In the script file, any line beginning with "#" is treated as comments and ignored silently. +TDengine CLI `taos` supports `source ` command for executing the SQL statements in the file in batch. The SQL statements for creating databases, creating tables, and inserting rows can be written in a single file with one statement on each line, then the file can be executed using the `source` command in TDengine CLI `taos` to execute the SQL statements in order and in batch. In the script file, any line beginning with "#" is treated as comments and ignored silently. ## Import from Data File -In TDengine CLI, data can be imported from a CSV file into an existing table. The data in single CSV must belong to same table and must be consistent with the schema of that table. The SQL statement is as below: +In TDengine CLI, data can be imported from a CSV file into an existing table. The data in a single CSV must belong to the same table and must be consistent with the schema of that table. The SQL statement is as below: ```sql insert into tb1 file 'path/data.csv'; ``` :::note -If there is description in the first line of a CSV file, please remove it before importing. If there is no value for a column, please use `NULL` without quotes. +If there is a description in the first line of the CSV file, please remove it before importing. If there is no value for a column, please use `NULL` without quotes. ::: -For example, there is a sub table d1001 whose schema is as below: +For example, there is a subtable d1001 whose schema is as below: ```sql taos> DESCRIBE d1001 @@ -49,7 +49,7 @@ The format of the CSV file to be imported, data.csv, is as below: '2018-10-12 06:38:05.000',18.30000,219,0.31000 ``` -Then, below SQL statement can be used to import data from file "data.csv", assuming the file is located under the home directory of current Linux user. +Then, the below SQL statement can be used to import data from file "data.csv", assuming the file is located under the home directory of the current Linux user. ```sql taos> insert into d1001 file '~/data.csv'; @@ -58,4 +58,4 @@ Query OK, 9 row(s) affected (0.004763s) ## Import using taosdump -A convenient tool for importing and exporting data is provided by TDengine, `taosdump`, which can used to export data from one TDengine cluster and import into another one. For the details of using `taosdump` please refer to [Tool for exporting and importing data: taosdump](/reference/taosdump). +A convenient tool for importing and exporting data is provided by TDengine, `taosdump`, which can be used to export data from one TDengine cluster and import into another one. For the details of using `taosdump` please refer to [Tool for exporting and importing data: taosdump](/reference/taosdump). diff --git a/docs-en/13-operation/08-export.md b/docs-en/13-operation/08-export.md index fa9625a7c5f6b0e6706d726bff410cee647286bb..5780de42faeaedbc1c985ad2aa2f52fe56c76971 100644 --- a/docs-en/13-operation/08-export.md +++ b/docs-en/13-operation/08-export.md @@ -2,11 +2,13 @@ title: Data Export --- -There are two ways of exporting data from a TDengine cluster, one is SQL statement in TDengine CLI, the other one is `taosdump`. +There are two ways of exporting data from a TDengine cluster: +- Using a SQL statement in TDengine CLI +- Using the `taosdump` tool ## Export Using SQL -If you want to export the data of a table or a STable, please execute below SQL statement in TDengine CLI. +If you want to export the data of a table or a STable, please execute the SQL statement below, in the TDengine CLI. ```sql select * from >> data.csv; @@ -16,4 +18,4 @@ The data of table or STable specified by `tb_name` will be exported into a file ## Export Using taosdump -With `taosdump`, you can choose to export the data of all databases, a database, a table or a STable, you can also choose export the data within a time range, or even only export the schema definition of a table. For the details of using `taosdump` please refer to [Tool for exporting and importing data: taosdump](/reference/taosdump). +With `taosdump`, you can choose to export the data of all databases, a database, a table or a STable, you can also choose to export the data within a time range, or even only export the schema definition of a table. For the details of using `taosdump` please refer to [Tool for exporting and importing data: taosdump](/reference/taosdump). diff --git a/docs-en/13-operation/09-status.md b/docs-en/13-operation/09-status.md index 3f3c6c9f1e86f9f33bafc7edfd79bebb175871cc..51396524ea281ae665c9fdf61d2e6e6202995537 100644 --- a/docs-en/13-operation/09-status.md +++ b/docs-en/13-operation/09-status.md @@ -3,7 +3,7 @@ sidebar_label: Connections & Tasks title: Manage Connections and Query Tasks --- -System operator can use TDengine CLI to show the connections, ongoing queries, stream computing, and can close connection or stop ongoing query task or stream computing. +A system operator can use the TDengine CLI to show connections, ongoing queries, stream computing, and can close connections or stop ongoing query tasks or stream computing. ## Show Connections @@ -13,7 +13,7 @@ SHOW CONNECTIONS; One column of the output of the above SQL command is "ip:port", which is the end point of the client. -## Close Connections Forcedly +## Force Close Connections ```sql KILL CONNECTION ; @@ -27,9 +27,9 @@ In the above SQL command, `connection-id` is from the first column of the output SHOW QUERIES; ``` -The first column of the output is query ID, which is composed of the corresponding connection ID and the sequence number of the current query task started on this connection, in format of "connection-id:query-no". +The first column of the output is query ID, which is composed of the corresponding connection ID and the sequence number of the current query task started on this connection. The format is "connection-id:query-no". -## Close Queries Forcedly +## Force Close Queries ```sql KILL QUERY ; @@ -43,12 +43,12 @@ In the above SQL command, `query-id` is from the first column of the output of ` SHOW STREAMS; ``` -The first column of the output is stream ID, which is composed of the connection ID and the sequence number of the current stream started on this connection, in the format of "connection-id:stream-no". +The first column of the output is stream ID, which is composed of the connection ID and the sequence number of the current stream started on this connection. The format is "connection-id:stream-no". -## Close Continuous Query Forcedly +## Force Close Continuous Query ```sql KILL STREAM ; ``` -The the above SQL command, `stream-id` is from the first column of the output of `SHOW STREAMS`. +The above SQL command, `stream-id` is from the first column of the output of `SHOW STREAMS`. diff --git a/docs-en/13-operation/10-monitor.md b/docs-en/13-operation/10-monitor.md index 019cf4f2948141fac79587429f1fdc3b06623945..a4679983f2bc77bb4e438f5d43fa1b8beb39b120 100644 --- a/docs-en/13-operation/10-monitor.md +++ b/docs-en/13-operation/10-monitor.md @@ -2,19 +2,19 @@ title: TDengine Monitoring --- -After TDengine is started, a database named `log` for monitoring is created automatically. The information about CPU, memory, disk, bandwidth, number of requests, disk I/O speed, slow query is written into `log` database on the basis of a predefined interval. Besides, some important system operations, like logon, create user, drop database, and alerts and warnings generated in TDengine are written into `log` database too. System operator can view the data in `log` database from TDengine CLI or from a web console. +After TDengine is started, a database named `log` is created automatically to help with monitoring. Information that includes CPU, memory and disk usage, bandwidth, number of requests, disk I/O speed, slow queries, is written into the `log` database at a predefined interval. Additionally, some important system operations, like logon, create user, drop database, and alerts and warnings generated in TDengine are written into the `log` database too. A system operator can view the data in `log` database from TDengine CLI or from a web console. -Collection of the monitoring information is enabled by default, but can be disabled by parameter `monitor` in configuration file. +The collection of the monitoring information is enabled by default, but can be disabled by parameter `monitor` in the configuration file. ## TDinsight -TDinsight is a total solution which uses the monitor database `log` mentioned previously and Grafana to monitor a TDengine cluster. +TDinsight is a complete solution which uses the monitoring database `log` mentioned previously, and Grafana, to monitor a TDengine cluster. From version 2.3.3.0, more monitoring data has been added in the `log` database. Please refer to [TDinsight Grafana Dashboard](https://grafana.com/grafana/dashboards/15167) to learn more details about using TDinsight to monitor TDengine. -A script `TDinsight.sh` is provided to deploy TDinsight in automatic way. +A script `TDinsight.sh` is provided to deploy TDinsight automatically. -Download `TDinsight.sh` with below command: +Download `TDinsight.sh` with the below command: ```bash wget https://github.com/taosdata/grafanaplugin/raw/master/dashboards/TDinsight.sh @@ -38,7 +38,7 @@ There are two ways to setup Grafana alert notification. sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E ``` -- The AliCloud SMS alert built in TDengine data source plugin can be enabled with parameter `-s`, the parameters of this way are as follows: +- The AliCloud SMS alert built in TDengine data source plugin can be enabled with parameter `-s`, the parameters of enabling this plugin are listed below: - `-I`: AliCloud SMS Key ID - `-K`: AliCloud SMS Key Secret @@ -47,7 +47,7 @@ There are two ways to setup Grafana alert notification. - `-T`: Input parameters in JSON format for the SMS notification template, for example`{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}` - `-B`: List of mobile numbers to be notified - Below is an example of the full command using this way. + Below is an example of the full command using the AliCloud SMS alert. ```bash sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -s \ @@ -55,6 +55,6 @@ There are two ways to setup Grafana alert notification. -T '{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}' ``` -Launch `TDinsight.sh` as above command and restart Grafana, then open Dashboard `http://localhost:3000/d/tdinsight`. +Launch `TDinsight.sh` with the command above and restart Grafana, then open Dashboard `http://localhost:3000/d/tdinsight`. For more use cases and restrictions please refer to [TDinsight](/reference/tdinsight/). diff --git a/docs-en/13-operation/11-optimize.md b/docs-en/13-operation/11-optimize.md deleted file mode 100644 index 7cccfc8b0d51a4bfda9ae4827130a3747f10e649..0000000000000000000000000000000000000000 --- a/docs-en/13-operation/11-optimize.md +++ /dev/null @@ -1,100 +0,0 @@ ---- -title: Performance Optimization ---- - -After a TDengine cluster has been running for long enough time, because of updating data, deleting tables and deleting expired data, there may be fragments in data files and query performance may be impacted. To resolve the problem of fragments, from version 2.1.3.0 a new SQL command `COMPACT` can be used to defragment the data files. - -```sql -COMPACT VNODES IN (vg_id1, vg_id2, ...) -``` - -`COMPACT` can be used to defragment one or more vgroups. The defragmentation work will be put in task queue for scheduling execution by TDengine. `SHOW VGROUPS` command can be used to get the vgroup ids to be used in `COMPACT` command. There is a column `compacting` in the output of `SHOW GROUPS` to indicate the compacting status of the vgroup: 2 means the vgroup is waiting in task queue for compacting, 1 means compacting is in progress, and 0 means the vgroup has nothing to do with compacting. - -Please be noted that a lot of disk I/O is required for defragementation operation, during which the performance may be impacted significantly for data insertion and query, data insertion may be blocked shortly in extreme cases. - -## Optimize Storage Parameters - -The data in different use cases may have different characteristics, such as the days to keep, number of replicas, collection interval, record size, number of collection points, compression or not, etc. To achieve best efficiency in storage, the parameters in below table can be used, all of them can be either configured in `taos.cfg` as default configuration or in the command `create database`. For detailed definition of these parameters please refer to [Configuration Parameters](/reference/config/). - -| # | Parameter | Unit | Definition | **Value Range** | **Default Value** | -| --- | --------- | ---- | ------------------------------------------------------------------------------ | ----------------------------------------------------------------------------------------------- | ----------------- | -| 1 | days | Day | The time range of the data stored in a single data file | 1-3650 | 10 | -| 2 | keep | Day | The number of days the data is kept in the database | 1-36500 | 3650 | -| 3 | cache | MB | The size of each memory block | 1-128 | 16 | -| 4 | blocks | None | The number of memory blocks used by each vnode | 3-10000 | 6 | -| 5 | quorum | None | The number of required confirmation in case of multiple replicas | 1-2 | 1 | -| 6 | minRows | None | The minimum number of rows in a data file | 10-1000 | 100 | -| 7 | maxRows | None | The maximum number of rows in a daa file | 200-10000 | 4096 | -| 8 | comp | None | Whether to compress the data | 0:uncompressed; 1: One Phase compression; 2: Two Phase compression | 2 | -| 9 | walLevel | None | wal sync level (named as "wal" in create database ) | 1:wal enabled without fsync; 2:wal enabled with fsync | 1 | -| 10 | fsync | ms | The time to wait for invoking fsync when walLevel is set to 2; 0 means no wait | 3000 | -| 11 | replica | none | The number of replications | 1-3 | 1 | -| 12 | precision | none | Time precision | ms: millisecond; us: microsecond;ns: nanosecond | ms | -| 13 | update | none | Whether to allow updating data | 0: not allowed; 1: a row must be updated as whole; 2: a part of columns in a row can be updated | 0 | -| 14 | cacheLast | none | Whether the latest data of a table is cached in memory | 0: not cached; 1: the last row is cached; 2: the latest non-NULL value of each column is cached | 0 | - -For a specific use case, there may be multiple kinds of data with different characteristics, it's best to put data with same characteristics in same database. So there may be multiple databases in a system while each database can be configured with different storage parameters to achieve best performance. The above parameters can be used when creating a database to override the default setting in configuration file. - -```sql - CREATE DATABASE demo DAYS 10 CACHE 32 BLOCKS 8 REPLICA 3 UPDATE 1; -``` - -The above SQL statement creates a database named as `demo`, in which each data file stores data across 10 days, the size of each memory block is 32 MB and each vnode is allocated with 8 blocks, the replica is set to 3, update operation is allowed, and all other parameters not specified in the command follow the default configuration in `taos.cfg`. - -Once a database is created, only some parameters can be changed and be effective immediately while others are can't. - -| **Parameter** | **Alterable** | **Value Range** | **Syntax** | -| ------------- | ------------- | ---------------- | -------------------------------------- | -| name | | | | -| create time | | | | -| ntables | | | | -| vgroups | | | | -| replica | **YES** | 1-3 | ALTER DATABASE REPLICA _n_ | -| quorum | **YES** | 1-2 | ALTER DATABASE QUORUM _n_ | -| days | | | | -| keep | **YES** | days-365000 | ALTER DATABASE KEEP _n_ | -| cache | | | | -| blocks | **YES** | 3-1000 | ALTER DATABASE BLOCKS _n_ | -| minrows | | | | -| maxrows | | | | -| wal | | | | -| fsync | | | | -| comp | **YES** | 0-2 | ALTER DATABASE COMP _n_ | -| precision | | | | -| status | | | | -| update | | | | -| cachelast | **YES** | 0 \| 1 \| 2 \| 3 | ALTER DATABASE CACHELAST _n_ | - -**Explanation:** Prior to version 2.1.3.0, `taosd` server process needs to be restarted for these parameters to take in effect if they are changed using `ALTER DATABASE`. - -When trying to join a new dnode into a running TDengine cluster, all the parameters related to cluster in the new dnode configuration must be consistent with the cluster, otherwise it can't join the cluster. The parameters that are checked when joining a dnode are as below. For detailed definition of these parameters please refer to [Configuration Parameters](/reference/config/). - -- numOfMnodes -- mnodeEqualVnodeNum -- offlineThreshold -- statusInterval -- maxTablesPerVnode -- maxVgroupsPerDb -- arbitrator -- timezone -- balance -- flowctrl -- slaveQuery -- adjustMaster - -For the convenience of debugging, the log setting of a dnode can be changed temporarily. The temporary change will be lost once the server is restarted. - -```sql -ALTER DNODE -``` - -- dnode_id: from output of "SHOW DNODES" -- config: the parameter to be changed, as below - - resetlog: close the old log file and create the new on - - debugFlag: 131 (INFO/ERROR/WARNING), 135 (DEBUG), 143 (TRACE) - -For example - -``` -alter dnode 1 debugFlag 135; -``` diff --git a/docs-en/13-operation/17-diagnose.md b/docs-en/13-operation/17-diagnose.md index b140d925c07386f93c82d492bb8bcf4d95349f12..2b474fddba4af5ba0c29103cd8ab1249d10d055b 100644 --- a/docs-en/13-operation/17-diagnose.md +++ b/docs-en/13-operation/17-diagnose.md @@ -4,19 +4,19 @@ title: Problem Diagnostics ## Network Connection Diagnostics -When the client is unable to access the server, the network connection between the client side and the server side needs to be checked to find out the root cause and resolve problems. +When a TDengine client is unable to access a TDengine server, the network connection between the client side and the server side must be checked to find the root cause and resolve problems. -The diagnostic for network connection can be executed between Linux and Linux or between Linux and Windows. +Diagnostics for network connections can be executed between Linux and Linux or between Linux and Windows. Diagnostic steps: -1. If the port range to be diagnosed are being occupied by a `taosd` server process, please firstly stop `taosd. -2. On the server side, execute command `taos -n server -P -l ` to monitor the port range starting from the port specified by `-P` parameter with the role of "server. -3. On the client side, execute command `taos -n client -h -P -l ` to send testing package to the specified server and port. +1. If the port range to be diagnosed is being occupied by a `taosd` server process, please first stop `taosd. +2. On the server side, execute command `taos -n server -P -l ` to monitor the port range starting from the port specified by `-P` parameter with the role of "server". +3. On the client side, execute command `taos -n client -h -P -l ` to send a testing package to the specified server and port. --l : The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000. Please be noted that the package length must be same in the above 2 commands executed on server side and client side respectively. +-l : The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000. Please note that the package length must be same in the above 2 commands executed on server side and client side respectively. -Output of the server side is as below for example: +Output of the server side for the example is below: ```bash # taos -n server -P 6000 @@ -47,7 +47,7 @@ Output of the server side is as below for example: 12/21 14:50:22.721261 0x7f53427ec700 UTL UDP: send:1000 bytes to 172.27.0.8 at 6011 ``` -Output of the client side is as below for example: +Output of the client side for the example is below: ```bash # taos -n client -h 172.27.0.7 -P 6000 @@ -65,13 +65,13 @@ Output of the client side is as below for example: 12/21 14:50:22.721274 0x7fc95d859200 UTL successed to test UDP port:6011 ``` -The output needs to be checked carefully for the system operator to find out root cause and solve the problem. +The output needs to be checked carefully for the system operator to find the root cause and resolve the problem. ## Startup Status and RPC Diagnostic -`taos -n startup -h ` can be used to check the startup status of a `taosd` process. This is a comman task for a system operator to do to determine whether `taosd` has been started successfully, especially in case of cluster. +`taos -n startup -h ` can be used to check the startup status of a `taosd` process. This is a common task which should be performed by a system operator, especially in the case of a cluster, to determine whether `taosd` has been started successfully. -`taos -n rpc -h ` can be used to check whether the port of a started `taosd` can be accessed or not. If `taosd` process doesn't respond or work abnormally, this command can be used to initiate a rpc communication with the specified fqdn to determine whether it's network problem or `taosd` is abnormal. +`taos -n rpc -h ` can be used to check whether the port of a started `taosd` can be accessed or not. If `taosd` process doesn't respond or is working abnormally, this command can be used to initiate a rpc communication with the specified fqdn to determine whether it's a network problem or whether `taosd` is abnormal. ## Sync and Arbitrator Diagnostic @@ -80,43 +80,43 @@ taos -n sync -P 6040 -h taos -n sync -P 6042 -h ``` -The above commands can be executed on Linux Shell to check whether the port for sync works well and whether the sync module of the server side works well. Besides, `-P 6042` is used to check whether the arbitrator is configured properly and works well. +The above commands can be executed in a Linux shell to check whether the port for sync is working well and whether the sync module on the server side is working well. Additionally, `-P 6042` is used to check whether the arbitrator is configured properly and is working well. ## Network Speed Diagnostic `taos -n speed -h -P 6030 -N 10 -l 10000000 -S TCP` -From version 2.2.0.0, the above command can be executed on Linux Shell to test the network speed, it sends uncompressed package to a running `taosd` server process or a simulated server process started by `taos -n server` to test the network speed. Parameters can be used when testing network speed are as below: +From version 2.2.0.0 onwards, the above command can be executed in a Linux shell to test network speed. The command sends uncompressed packages to a running `taosd` server process or a simulated server process started by `taos -n server` to test the network speed. Parameters can be used when testing network speed are as below: --n:When set to "speed", it means testing network speed --h:The FQDN or IP of the server process to be connected to; if not set, the FQDN configured in `taos.cfg` is used --P:The port of the server process to connect to, the default value is 6030 --N:The number of packages that will be sent in the test, range is [1,10000], default value is 100 --l:The size of each package in bytes, range is [1024, 1024 \* 1024 \* 1024], default value is 1024 --S:The type of network packages to send, can be either TCP or UDP, default value is +-n:When set to "speed", it means testing network speed. +-h:The FQDN or IP of the server process to be connected to; if not set, the FQDN configured in `taos.cfg` is used. +-P:The port of the server process to connect to, the default value is 6030. +-N:The number of packages that will be sent in the test, range is [1,10000], default value is 100. +-l:The size of each package in bytes, range is [1024, 1024 \* 1024 \* 1024], default value is 1024. +-S:The type of network packages to send, can be either TCP or UDP, default value is TCP. ## FQDN Resolution Diagnostic `taos -n fqdn -h ` -From version 2.2.0.0, the above command can be executed on Linux Shell to test the resolution speed of FQDN. It can be used to try to resolve a FQDN to an IP address and record the time spent in this process. The parameters that can be used for this purpose are as below: +From version 2.2.0.0 onward, the above command can be executed in a Linux shell to test the resolution speed of FQDN. It can be used to try to resolve a FQDN to an IP address and record the time spent in this process. The parameters that can be used for this purpose are as below: --n:When set to "fqdn", it means testing the speed of resolving FQDN --h:The FQDN to be resolved. If not set, the `FQDN` parameter in `taos.cfg` is used by default. +-n:When set to "fqdn", it means testing the speed of resolving FQDN. +-h:The FQDN to be resolved. If not set, the `FQDN` parameter in `taos.cfg` is used by default. ## Server Log -The parameter `debugFlag` is used to control the log level of `taosd` server process. The default value is 131, for debug purpose it needs to be escalated to 135 or 143. +The parameter `debugFlag` is used to control the log level of the `taosd` server process. The default value is 131. For debugging and tracing, it needs to be set to either 135 or 143 respectively. -Once this parameter is set to 135 or 143, the log file grows very quickly especially when there is huge volume of data insertion and data query requests. If all the logs are stored together, some important information may be missed very easily, so on server side important information is stored at different place from other logs. +Once this parameter is set to 135 or 143, the log file grows very quickly especially when there is a huge volume of data insertion and data query requests. If all the logs are stored together, some important information may be missed very easily and so on the server side, important information is stored in a different place from other logs. - The log at level of INFO, WARNING and ERROR is stored in `taosinfo` so that it is easy to find important information - The log at level of DEBUG (135) and TRACE (143) and other information not handled by `taosinfo` are stored in `taosdlog` ## Client Log -An independent log file, named as "taoslog+" is generated for each client program, i.e. a client process. The default value of `debugFlag` is also 131 and only log at level of INFO/ERROR/WARNING is recorded, it and needs to be changed to 135 or 143 so that log at DEBUG or TRACE level can be recorded for debugging purpose. +An independent log file, named as "taoslog+" is generated for each client program, i.e. a client process. The default value of `debugFlag` is also 131 and only logs at level of INFO/ERROR/WARNING are recorded. As stated above, for debugging and tracing, it needs to be changed to 135 or 143 respectively, so that logs at DEBUG or TRACE level can be recorded. The maximum length of a single log file is controlled by parameter `numOfLogLines` and only 2 log files are kept for each `taosd` server process. -log file is written in async way to minimize the workload on disk, bu the penalty is that a few log lines may be lost in some extreme conditions. +Log files are written in an async way to minimize the workload on disk, but the trade off for performance is that a few log lines may be lost in some extreme conditions. diff --git a/docs-en/13-operation/index.md b/docs-en/13-operation/index.md index a9801c0390f294d6b39b1219cc4055149871ef9c..c64749c40e26f091e4a25e0238827ebceff4b069 100644 --- a/docs-en/13-operation/index.md +++ b/docs-en/13-operation/index.md @@ -2,7 +2,7 @@ title: Administration --- -This chapter is mainly written for system administrators, covering download, install/uninstall, data import/export, system monitoring, user management, connection management, etc. Capacity planning and system optimization are also covered. +This chapter is mainly written for system administrators. It covers download, install/uninstall, data import/export, system monitoring, user management, connection management, capacity planning and system optimization. ```mdx-code-block import DocCardList from '@theme/DocCardList'; diff --git a/docs-en/14-reference/02-rest-api/02-rest-api.mdx b/docs-en/14-reference/02-rest-api/02-rest-api.mdx index f405d551e530a37a5221e71a824f605fba0c0db9..990af861961e9daf4ac775462e21d6d9852d17c1 100644 --- a/docs-en/14-reference/02-rest-api/02-rest-api.mdx +++ b/docs-en/14-reference/02-rest-api/02-rest-api.mdx @@ -2,23 +2,23 @@ title: REST API --- -To support the development of various types of platforms, TDengine provides an API that conforms to the REST principle, namely REST API. To minimize the learning cost, different from the other database REST APIs, TDengine directly requests the SQL command contained in the request BODY through HTTP POST to operate the database and only requires a URL. +To support the development of various types of applications and platforms, TDengine provides an API that conforms to REST principles; namely REST API. To minimize the learning cost, unlike REST APIs for other database engines, TDengine allows insertion of SQL commands in the BODY of an HTTP POST request, to operate the database. :::note -One difference from the native connector is that the REST interface is stateless, so the `USE db_name` command has no effect. All references to table names and super table names need to specify the database name prefix. (Since version 2.2.0.0, it is supported to specify db_name in RESTful URL. If the database name prefix is not specified in the SQL command, the `db_name` specified in the URL will be used. Since version 2.4.0.0, REST service is provided by taosAdapter by default. And it requires that the `db_name` must be specified in the URL.) +One difference from the native connector is that the REST interface is stateless and so the `USE db_name` command has no effect. All references to table names and super table names need to specify the database name in the prefix. (Since version 2.2.0.0, TDengine supports specification of the db_name in RESTful URL. If the database name prefix is not specified in the SQL command, the `db_name` specified in the URL will be used. Since version 2.4.0.0, REST service is provided by taosAdapter by default and it requires that the `db_name` must be specified in the URL.) ::: ## Installation -The REST interface does not rely on any TDengine native library, so the client application does not need to install any TDengine libraries. The client application's development language supports the HTTP protocol is enough. +The REST interface does not rely on any TDengine native library, so the client application does not need to install any TDengine libraries. The client application's development language only needs to support the HTTP protocol. ## Verification If the TDengine server is already installed, it can be verified as follows: -The following is an Ubuntu environment using the `curl` tool (to confirm that it is installed) to verify that the REST interface is working. +The following example is in an Ubuntu environment and uses the `curl` tool to verify that the REST interface is working. Note that the `curl` tool may need to be installed in your environment. -The following example lists all databases, replacing `h1.taosdata.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number. +The following example lists all databases on the host h1.taosdata.com. To use it in your environment, replace `h1.taosdata.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number. ```html curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' h1.taosdata.com:6041/rest/sql @@ -89,7 +89,7 @@ For example, `http://h1.taos.com:6041/rest/sql/test` is a URL to `h1.taos.com:60 TDengine supports both Basic authentication and custom authentication mechanisms, and subsequent versions will provide a standard secure digital signature mechanism for authentication. -- The custom authentication information is as follows (Let's introduce token later) +- The custom authentication information is as follows. More details about "token" later. ``` Authorization: Taosd @@ -136,7 +136,7 @@ The return result is in JSON format, as follows: Description: -- status: tell if the operation result is success or failure. +- status: tells you whethre the operation result is success or failure. - head: the definition of the table, or just one column "affected_rows" if no result set is returned. (As of version 2.0.17.0, it is recommended not to rely on the head return value to determine the data column type but rather use column_meta. In later versions, the head item may be removed from the return value.) - column_meta: this item is added to the return value to indicate the data type of each column in the data with version 2.0.17.0 and later versions. Each column is described by three values: column name, column type, and type length. For example, `["current",6,4]` means that the column name is "current", the column type is 6, which is the float type, and the type length is 4, which is the float type with 4 bytes. If the column type is binary or nchar, the type length indicates the maximum length of content stored in the column, not the length of the specific data in this return value. When the column type is nchar, the type length indicates the number of Unicode characters that can be saved, not bytes. - data: The exact data returned, presented row by row, or just [[affected_rows]] if no result set is returned. The order of the data columns in each row of data is the same as that of the data columns described in column_meta. diff --git a/docs-en/14-reference/03-connector/03-connector.mdx b/docs-en/14-reference/03-connector/03-connector.mdx index 6be914bdb4b701f478b6b8b27366d6ebb5a39ec8..44685579005c2cebd5e0194a10d457cd1199051e 100644 --- a/docs-en/14-reference/03-connector/03-connector.mdx +++ b/docs-en/14-reference/03-connector/03-connector.mdx @@ -4,7 +4,7 @@ title: Connector TDengine provides a rich set of APIs (application development interface). To facilitate users to develop their applications quickly, TDengine supports connectors for multiple programming languages, including official connectors for C/C++, Java, Python, Go, Node.js, C#, and Rust. These connectors support connecting to TDengine clusters using both native interfaces (taosc) and REST interfaces (not supported in a few languages yet). Community developers have also contributed several unofficial connectors, such as the ADO.NET connector, the Lua connector, and the PHP connector. -![image-connector](/img/connector.png) +![TDengine Database image-connector](./connector.webp) ## Supported platforms diff --git a/docs-en/14-reference/03-connector/connector.webp b/docs-en/14-reference/03-connector/connector.webp new file mode 100644 index 0000000000000000000000000000000000000000..040cf5c26c726b345b2e0e5363dd3c677bec61be Binary files /dev/null and b/docs-en/14-reference/03-connector/connector.webp differ diff --git a/docs-en/14-reference/03-connector/csharp.mdx b/docs-en/14-reference/03-connector/csharp.mdx index ca4b1b9ecea84a7c05e3c9da77f1b44545d89081..1b0748fbd0d769b949d7eeb3fc72463539cc7564 100644 --- a/docs-en/14-reference/03-connector/csharp.mdx +++ b/docs-en/14-reference/03-connector/csharp.mdx @@ -19,7 +19,7 @@ import CSAsyncQuery from "../../07-develop/04-query-data/_cs_async.mdx" `TDengine.Connector` is a C# language connector provided by TDengine that allows C# developers to develop C# applications that access TDengine cluster data. -The `TDengine.Connector` connector supports connect to TDengine instances via the TDengine client driver (taosc), providing data writing, querying, subscription, schemaless writing, bind interface, etc. The `TDengine.Connector` currently does not provide a REST connection interface. Developers can write their RESTful application by referring to the [RESTful APIs](https://docs.taosdata.com//reference/restful-api/) documentation. +The `TDengine.Connector` connector supports connect to TDengine instances via the TDengine client driver (taosc), providing data writing, querying, subscription, schemaless writing, bind interface, etc. The `TDengine.Connector` currently does not provide a REST connection interface. Developers can write their RESTful application by referring to the [REST API](/reference/rest-api/) documentation. This article describes how to install `TDengine.Connector` in a Linux or Windows environment and connect to TDengine clusters via `TDengine.Connector` to perform basic operations such as data writing and querying. @@ -179,9 +179,9 @@ namespace TDengineExample 1. "Unable to establish connection", "Unable to resolve FQDN" - Usually, it cause by the FQDN configuration is incorrect, you can refer to [How to understand TDengine's FQDN (Chinese)](https://www.taosdata.com/blog/2021/07/29/2741.html) to solve it. 2. + Usually, it cause by the FQDN configuration is incorrect, you can refer to [How to understand TDengine's FQDN (Chinese)](https://www.taosdata.com/blog/2021/07/29/2741.html) to solve it. -Unhandled exception. System.DllNotFoundException: Unable to load DLL 'taos' or one of its dependencies: The specified module cannot be found. +2. Unhandled exception. System.DllNotFoundException: Unable to load DLL 'taos' or one of its dependencies: The specified module cannot be found. This is usually because the program did not find the dependent client driver. The solution is to copy `C:\TDengine\driver\taos.dll` to the `C:\Windows\System32\` directory on Windows, and create the following soft link on Linux `ln -s /usr/local/taos/driver/libtaos.so.x.x .x.x /usr/lib/libtaos.so` will work. diff --git a/docs-en/14-reference/03-connector/go.mdx b/docs-en/14-reference/03-connector/go.mdx index fd5930f07ff7184bd8dd5ff19cd3860f9718eaf9..c1e85ae4eb1d1d7ccfb70b2b4f38cebaf6cbf06c 100644 --- a/docs-en/14-reference/03-connector/go.mdx +++ b/docs-en/14-reference/03-connector/go.mdx @@ -15,9 +15,9 @@ import GoOpenTSDBTelnet from "../../07-develop/03-insert-data/_go_opts_telnet.md import GoOpenTSDBJson from "../../07-develop/03-insert-data/_go_opts_json.mdx" import GoQuery from "../../07-develop/04-query-data/_go.mdx" -`driver-go` is the official Go language connector for TDengine, which implements the interface to the Go language [database/sql](https://golang.org/pkg/database/sql/) package. Go developers can use it to develop applications that access TDengine cluster data. +`driver-go` is the official Go language connector for TDengine. It implements the [database/sql](https://golang.org/pkg/database/sql/) package, the generic Go language interface to SQL databases. Go developers can use it to develop applications that access TDengine cluster data. -`driver-go` provides two ways to establish connections. One is **native connection**, which connects to TDengine instances natively through the TDengine client driver (taosc), supporting data writing, querying, subscriptions, schemaless writing, and bind interface. The other is the **REST connection**, which connects to TDengine instances via the REST interface provided by taosAdapter. The set of features implemented by the REST connection differs slightly from the native connection. +`driver-go` provides two ways to establish connections. One is **native connection**, which connects to TDengine instances natively through the TDengine client driver (taosc), supporting data writing, querying, subscriptions, schemaless writing, and bind interface. The other is the **REST connection**, which connects to TDengine instances via the REST interface provided by taosAdapter. The set of features implemented by the REST connection differs slightly from those implemented by the native connection. This article describes how to install `driver-go` and connect to TDengine clusters and perform basic operations such as data query and data writing through `driver-go`. @@ -213,7 +213,7 @@ func main() { Since the REST interface is stateless, the `use db` syntax will not work. You need to put the db name into the SQL command, e.g. `create table if not exists tb1 (ts timestamp, a int)` to `create table if not exists test.tb1 (ts timestamp, a int)` otherwise it will report the error `[0x217] Database not specified or available`. -You can also put the db name in the DSN by changing `root:taosdata@http(localhost:6041)/` to `root:taosdata@http(localhost:6041)/test`. This method is supported by taosAdapter in TDengine 2.4.0.5. is supported since TDengine 2.4.0.5. Executing the `create database` statement when the specified db does not exist will not report an error while executing other queries or writing against that db will report an error. +You can also put the db name in the DSN by changing `root:taosdata@http(localhost:6041)/` to `root:taosdata@http(localhost:6041)/test`. This method is supported by taosAdapter since TDengine 2.4.0.5. Executing the `create database` statement when the specified db does not exist will not report an error while executing other queries or writing against that db will report an error. The complete example is as follows. @@ -289,7 +289,7 @@ func main() { 6. `readBufferSize` parameter has no significant effect after being increased - If you increase `readBufferSize` will reduce the number of `syscall` calls when fetching results. If the query result is smaller, modifying this parameter will not improve significantly. If you increase the parameter value too much, the bottleneck will be parsing JSON data. If you need to optimize the query speed, you must adjust the value according to the actual situation to achieve the best query result. + Increasing `readBufferSize` will reduce the number of `syscall` calls when fetching results. If the query result is smaller, modifying this parameter will not improve performance significantly. If you increase the parameter value too much, the bottleneck will be parsing JSON data. If you need to optimize the query speed, you must adjust the value based on the actual situation to achieve the best query performance. 7. `disableCompression` parameter is set to `false` when the query efficiency is reduced diff --git a/docs-en/14-reference/03-connector/java.mdx b/docs-en/14-reference/03-connector/java.mdx index 328907c4d781bdea8d30623e01d431cedbf8d0fa..33d715c2e218fd6db4f61882f2a7a92baa80f5a2 100644 --- a/docs-en/14-reference/03-connector/java.mdx +++ b/docs-en/14-reference/03-connector/java.mdx @@ -9,19 +9,19 @@ description: TDengine Java based on JDBC API and provide both native and REST co import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -'taos-jdbcdriver' is TDengine's official Java language connector, which allows Java developers to develop applications that access the TDengine database. 'taos-jdbcdriver' implements the interface of the JDBC driver standard and provides two forms of connectors. One is to connect to a TDengine instance natively through the TDengine client driver (taosc), which supports functions including data writing, querying, subscription, schemaless writing, and bind interface. And the other is to connect to a TDengine instance through the REST interface provided by taosAdapter (2.4.0.0 and later). REST connections implement has a slight differences to compare the set of features implemented and native connections. +'taos-jdbcdriver' is TDengine's official Java language connector, which allows Java developers to develop applications that access the TDengine database. 'taos-jdbcdriver' implements the interface of the JDBC driver standard and provides two forms of connectors. One is to connect to a TDengine instance natively through the TDengine client driver (taosc), which supports functions including data writing, querying, subscription, schemaless writing, and bind interface. And the other is to connect to a TDengine instance through the REST interface provided by taosAdapter (2.4.0.0 and later). The implementation of the REST connection and those of the native connections have slight differences in features. -![tdengine-connector](tdengine-jdbc-connector.png) +![TDengine Database tdengine-connector](tdengine-jdbc-connector.webp) The preceding diagram shows two ways for a Java app to access TDengine via connector: - JDBC native connection: Java applications use TSDBDriver on physical node 1 (pnode1) to call client-driven directly (`libtaos.so` or `taos.dll`) APIs to send writing and query requests to taosd instances located on physical node 2 (pnode2). -- JDBC REST connection: The Java application encapsulates the SQL as a REST request via RestfulDriver, sends it to the REST server of physical node 2 (taosAdapter), requests TDengine server through the REST server, and returns the result. +- JDBC REST connection: The Java application encapsulates the SQL as a REST request via RestfulDriver, sends it to the REST server (taosAdapter) on physical node 2. taosAdapter forwards the request to TDengine server and returns the result. -Using REST connection, which does not rely on TDengine client drivers.It can be cross-platform more convenient and flexible but introduce about 30% lower performance than native connection. +The REST connection, which does not rely on TDengine client drivers, is more convenient and flexible, in addition to being cross-platform. However the performance is about 30% lower than that of the native connection. :::info -TDengine's JDBC driver implementation is as consistent as possible with the relational database driver. Still, there are differences in the use scenarios and technical characteristics of TDengine and relational object databases, so 'taos-jdbcdriver' also has some differences from traditional JDBC drivers. You need to pay attention to the following points when using: +TDengine's JDBC driver implementation is as consistent as possible with the relational database driver. Still, there are differences in the use scenarios and technical characteristics of TDengine and relational object databases. So 'taos-jdbcdriver' also has some differences from traditional JDBC drivers. It is important to keep the following points in mind: - TDengine does not currently support delete operations for individual data records. - Transactional operations are not currently supported. @@ -88,7 +88,7 @@ Add following dependency in the `pom.xml` file of your Maven project: -You can build Java connector from source code after clone TDengine project: +You can build Java connector from source code after cloning the TDengine project: ```shell git clone https://github.com/taosdata/TDengine.git @@ -96,7 +96,7 @@ cd TDengine/src/connector/jdbc mvn clean install -Dmaven.test.skip=true ``` -After compilation, a jar package of taos-jdbcdriver-2.0.XX-dist .jar is generated in the target directory, and the compiled jar file is automatically placed in the local Maven repository. +After compilation, a jar package named taos-jdbcdriver-2.0.XX-dist.jar is generated in the target directory, and the compiled jar file is automatically placed in the local Maven repository. @@ -186,7 +186,7 @@ Connection conn = DriverManager.getConnection(jdbcUrl); In the above example, a RestfulDriver with a JDBC REST connection is used to establish a connection to a database named `test` with hostname `taosdemo.com` on port `6041`. The URL specifies the user name as `root` and the password as `taosdata`. -There is no dependency on the client driver when Using a JDBC REST connection. Compared to a JDBC native connection, only the following are required: 1. +There is no dependency on the client driver when Using a JDBC REST connection. Compared to a JDBC native connection, only the following are required: 1. driverClass specified as "com.taosdata.jdbc.rs.RestfulDriver". 2. jdbcUrl starting with "jdbc:TAOS-RS://". @@ -206,10 +206,10 @@ The configuration parameters in the URL are as follows. - Unlike the native connection method, the REST interface is stateless. When using the JDBC REST connection, you need to specify the database name of the table and super table in SQL. For example. ```sql -INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('beijing') VALUES(now, 24.6); +INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFrancisco') VALUES(now, 24.6); ``` -- Starting from taos-jdbcdriver-2.0.36 and TDengine 2.2.0.0, if dbname is specified in the URL, JDBC REST connections will use `/rest/sql/dbname` as the URL for REST requests by default, and there is no need to specify dbname in SQL. For example, if the URL is `jdbc:TAOS-RS://127.0.0.1:6041/test`, then the SQL can be executed: insert into t1 using weather(ts, temperature) tags('beijing') values(now, 24.6); +- Starting from taos-jdbcdriver-2.0.36 and TDengine 2.2.0.0, if dbname is specified in the URL, JDBC REST connections will use `/rest/sql/dbname` as the URL for REST requests by default, and there is no need to specify dbname in SQL. For example, if the URL is `jdbc:TAOS-RS://127.0.0.1:6041/test`, then the SQL can be executed: insert into test using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6); ::: @@ -271,7 +271,7 @@ If the configuration parameters are duplicated in the URL, Properties, or client 2. Properties connProps 3. the configuration file taos.cfg of the TDengine client driver when using a native connection -For example, if you specify the password as `taosdata` in the URL and specify the password as `taosdemo` in the Properties simultaneously. In this case, JDBC will use the password in the URL to establish the connection. +For example, if you specify the password as `taosdata` in the URL and specify the password as `taosdemo` in the Properties simultaneously, JDBC will use the password in the URL to establish the connection. ## Usage examples @@ -323,7 +323,7 @@ while(resultSet.next()){ } ``` -> The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, starting from 1, it is recommended to use the field names to get them. +> The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, you have to start from 1. However, we recommend using the field names to get the values of the fields in the result set. ### Handling exceptions @@ -565,7 +565,7 @@ public class ParameterBindingDemo { // set table name pstmt.setTableName("t5_" + i); // set tags - pstmt.setTagNString(0, "Beijing-abc"); + pstmt.setTagNString(0, "California-abc"); // set columns ArrayList tsList = new ArrayList<>(); @@ -576,7 +576,7 @@ public class ParameterBindingDemo { ArrayList f1List = new ArrayList<>(); for (int j = 0; j < numOfRow; j++) { - f1List.add("Beijing-abc"); + f1List.add("California-abc"); } pstmt.setNString(1, f1List, BINARY_COLUMN_SIZE); @@ -623,7 +623,7 @@ public void setNString(int columnIndex, ArrayList list, int size) throws ### Schemaless Writing -Starting with version 2.2.0.0, TDengine has added the ability to schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. See [schemaless writing](/reference/schemaless/) for details. +Starting with version 2.2.0.0, TDengine has added the ability to perform schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. See [schemaless writing](/reference/schemaless/) for details. **Note**. @@ -635,7 +635,7 @@ public class SchemalessInsertTest { private static final String host = "127.0.0.1"; private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000"; private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0"; - private static final String jsonDemo = "{\"metric\": \"meter_current\",\"timestamp\": 1346846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"Beijing\", \"id\": \"d1001\"}}"; + private static final String jsonDemo = "{\"metric\": \"meter_current\",\"timestamp\": 1346846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}"; public static void main(String[] args) throws SQLException { final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; @@ -666,16 +666,16 @@ The TDengine Java Connector supports subscription functionality with the followi #### Create subscriptions ```java -TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false); +TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topicname", "select * from meters", false); ``` The three parameters of the `subscribe()` method have the following meanings. -- topic: the subscribed topic (i.e., name). This parameter is the unique identifier of the subscription -- sql: the query statement of the subscription, this statement can only be `select` statement, only the original data should be queried, and you can query only the data in the positive time order +- topicname: the name of the subscribed topic. This parameter is the unique identifier of the subscription. +- sql: the query statement of the subscription. This statement can only be a `select` statement. Only original data can be queried, and you can query the data only temporal order. - restart: if the subscription already exists, whether to restart or continue the previous subscription -The above example will use the SQL command `select * from meters` to create a subscription named `topic`. If the subscription exists, it will continue the progress of the previous query instead of consuming all the data from the beginning. +The above example will use the SQL command `select * from meters` to create a subscription named `topicname`. If the subscription exists, it will continue the progress of the previous query instead of consuming all the data from the beginning. #### Subscribe to consume data diff --git a/docs-en/14-reference/03-connector/node.mdx b/docs-en/14-reference/03-connector/node.mdx index 48f724426a96e62e5b56ab4285e5c5fabc95c765..0774c35d626dcde16c1a834677da5aacf07a392e 100644 --- a/docs-en/14-reference/03-connector/node.mdx +++ b/docs-en/14-reference/03-connector/node.mdx @@ -14,7 +14,6 @@ import NodeInfluxLine from "../../07-develop/03-insert-data/_js_line.mdx"; import NodeOpenTSDBTelnet from "../../07-develop/03-insert-data/_js_opts_telnet.mdx"; import NodeOpenTSDBJson from "../../07-develop/03-insert-data/_js_opts_json.mdx"; import NodeQuery from "../../07-develop/04-query-data/_js.mdx"; -import NodeAsyncQuery from "../../07-develop/04-query-data/_js_async.mdx"; `td2.0-connector` and `td2.0-rest-connector` are the official Node.js language connectors for TDengine. Node.js developers can develop applications to access TDengine instance data. @@ -78,7 +77,7 @@ Manually install the following tools. - Install [Python](https://www.python.org/downloads/) 2.7 (`v3.x.x` is not supported) and execute `npm config set python python2.7`. - Go to the `cmd` command-line interface, `npm config set msvs_version 2017` -Refer to Microsoft's Node.js User Manual [Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows- environment. md#compiling-native-addon-modules). +Refer to Microsoft's Node.js User Manual [Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules). If using ARM64 Node.js on Windows 10 ARM, you must add "Visual C++ compilers and libraries for ARM64" and "Visual C++ ATL for ARM64". @@ -189,14 +188,8 @@ let cursor = conn.cursor(); ### Query data -#### Synchronous queries - -#### asynchronous query - - - ## More Sample Programs | Sample Programs | Sample Program Description | diff --git a/docs-en/14-reference/03-connector/python.mdx b/docs-en/14-reference/03-connector/python.mdx index 2b238173e04e3e13de36b5ac4d91d0cda290ca72..69eec2388d460754493d2b775f14ab4bbf129799 100644 --- a/docs-en/14-reference/03-connector/python.mdx +++ b/docs-en/14-reference/03-connector/python.mdx @@ -11,18 +11,18 @@ import TabItem from "@theme/TabItem"; `taospy` is the official Python connector for TDengine. `taospy` provides a rich set of APIs that makes it easy for Python applications to access TDengine. `taospy` wraps both the [native interface](/reference/connector/cpp) and [REST interface](/reference/rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively. In addition to wrapping the native and REST interfaces, `taospy` also provides a set of programming interfaces that conforms to the [Python Data Access Specification (PEP 249)](https://peps.python.org/pep-0249/). It is easy to integrate `taospy` with many third-party tools, such as [SQLAlchemy](https://www.sqlalchemy.org/) and [pandas](https://pandas.pydata.org/). -The connection to the server directly using the native interface provided by the client driver is referred to hereinafter as a "native connection"; the connection to the server using the REST interface provided by taosAdapter is referred to hereinafter as a "REST connection". +The direct connection to the server using the native interface provided by the client driver is referred to hereinafter as a "native connection"; the connection to the server using the REST interface provided by taosAdapter is referred to hereinafter as a "REST connection". The source code for the Python connector is hosted on [GitHub](https://github.com/taosdata/taos-connector-python). ## Supported Platforms -- The native connection [supported platforms](/reference/connector/#supported-platforms) is the same as the one supported by the TDengine client. +- The [supported platforms](/reference/connector/#supported-platforms) for the native connection are the same as the ones supported by the TDengine client. - REST connections are supported on all platforms that can run Python. ## Version selection -We recommend using the latest version of `taospy`, regardless what the version of TDengine is. +We recommend using the latest version of `taospy`, regardless of the version of TDengine. ## Supported features @@ -53,7 +53,7 @@ Earlier TDengine client software includes the Python connector. If the Python co ::: -#### to install `taospy` +#### To install `taospy` @@ -139,7 +139,7 @@ The FQDN above can be the FQDN of any dnode in the cluster, and the PORT is the -For REST connections and making sure the cluster is up, make sure the taosAdapter component is up. This can be tested using the following `curl ` command. +For REST connections, make sure the cluster and taosAdapter component, are running. This can be tested using the following `curl ` command. ``` curl -u root:taosdata http://:/rest/sql -d "select server_version()" @@ -312,7 +312,7 @@ For a more detailed description of the `sql()` method, please refer to [RestClie ### Exception handling -All database operations will be thrown directly if an exception occurs. The application is responsible for exception handling. For example: +All errors from database operations are thrown directly as exceptions and the error message from the database is passed up the exception stack. The application is responsible for exception handling. For example: ```python {{#include docs-examples/python/handle_exception.py}} @@ -320,7 +320,7 @@ All database operations will be thrown directly if an exception occurs. The appl ### About nanoseconds -Due to the current imperfection of Python's nanosecond support (see link below), the current implementation returns integers at nanosecond precision instead of the `datetime` type produced by `ms and `us`, which application developers will need to handle on their own. And it is recommended to use pandas' to_datetime(). The Python Connector may modify the interface in the future if Python officially supports nanoseconds in full. +Due to the current imperfection of Python's nanosecond support (see link below), the current implementation returns integers at nanosecond precision instead of the `datetime` type produced by `ms` and `us`, which application developers will need to handle on their own. And it is recommended to use pandas' to_datetime(). The Python Connector may modify the interface in the future if Python officially supports nanoseconds in full. 1. https://stackoverflow.com/questions/10611328/parsing-datetime-strings-containing-nanoseconds 2. https://www.python.org/dev/peps/pep-0564/ @@ -328,7 +328,7 @@ Due to the current imperfection of Python's nanosecond support (see link below), ## Frequently Asked Questions -Welcome to [ask questions or report questions] (https://github.com/taosdata/taos-connector-python/issues). +Welcome to [ask questions or report questions](https://github.com/taosdata/taos-connector-python/issues). ## Important Update diff --git a/docs-en/14-reference/03-connector/rust.mdx b/docs-en/14-reference/03-connector/rust.mdx index 2c8fe68c1ca8b091b8d685d8e20942a02ab2c5e8..cd54f35982ec13fc3c9160145fa002fb6f1d094b 100644 --- a/docs-en/14-reference/03-connector/rust.mdx +++ b/docs-en/14-reference/03-connector/rust.mdx @@ -30,7 +30,7 @@ REST connections are supported on all platforms that can run Rust. Please refer to [version support list](/reference/connector#version-support). -The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. Recommend to use TDengine version 2.4 or higher to avoid known issues. +The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 2.4 or higher to avoid known issues. ## Installation @@ -206,7 +206,7 @@ let conn: Taos = cfg.connect(); ### Connection pooling -In complex applications, recommand to enable connection pool. Connection pool for [libtaos] is implemented using [r2d2]. +In complex applications, we recommend enabling connection pools. Connection pool for [libtaos] is implemented using [r2d2]. As follows, a connection pool with default parameters can be generated. @@ -269,7 +269,7 @@ The [Taos] structure is the connection manager in [libtaos] and provides two mai Note that Rust asynchronous functions and an asynchronous runtime are required. -[Taos] provides partial Rust methodization of SQL to reduce the frequency of `format!` code blocks. +[Taos] provides a few Rust methods that encapsulate SQL to reduce the frequency of `format!` code blocks. - `.describe(table: &str)`: Executes `DESCRIBE` and returns a Rust data structure. - `.create_database(database: &str)`: Executes the `CREATE DATABASE` statement. @@ -279,7 +279,7 @@ In addition, this structure is also the entry point for [Parameter Binding](#Par ### Bind Interface -Similar to the C interface, Rust provides the bind interface's wraping. First, create a bind object [Stmt] for a SQL command from the [Taos] object. +Similar to the C interface, Rust provides the bind interface's wrapping. First, create a bind object [Stmt] for a SQL command from the [Taos] object. ```rust let mut stmt: Stmt = taos.stmt("insert into ? values(? ,?)") ? ; diff --git a/docs-en/14-reference/03-connector/tdengine-jdbc-connector.png b/docs-en/14-reference/03-connector/tdengine-jdbc-connector.png deleted file mode 100644 index 7541aaf98ad73cbddac44c34bd775b32ab3a735e..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/03-connector/tdengine-jdbc-connector.png and /dev/null differ diff --git a/docs-en/14-reference/03-connector/tdengine-jdbc-connector.webp b/docs-en/14-reference/03-connector/tdengine-jdbc-connector.webp new file mode 100644 index 0000000000000000000000000000000000000000..37cf6d90a528e320d5cb7d6da502d3a5b10aa4ee Binary files /dev/null and b/docs-en/14-reference/03-connector/tdengine-jdbc-connector.webp differ diff --git a/docs-en/14-reference/04-taosadapter.md b/docs-en/14-reference/04-taosadapter.md index 85fd2923b02189d6f3cfd73efff784d12c3bb69a..3264124655e7040e1d94b43500a0b582d95cb5a1 100644 --- a/docs-en/14-reference/04-taosadapter.md +++ b/docs-en/14-reference/04-taosadapter.md @@ -24,21 +24,21 @@ taosAdapter provides the following features. ## taosAdapter architecture diagram -![taosAdapter Architecture](taosAdapter-architecture.png) +![TDengine Database taosAdapter Architecture](taosAdapter-architecture.webp) ## taosAdapter Deployment Method ### Install taosAdapter -taosAdapter has been part of TDengine server software since TDengine v2.4.0.0. If you use the TDengine server, you don't need additional steps to install taosAdapter. You can download taosAdapter from [TAOSData official website](https://taosdata.com/en/all-downloads/) to download the TDengine server installation package (taosAdapter is included in v2.4.0.0 and later version). If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/develop/BUILD.md) documentation. +taosAdapter has been part of TDengine server software since TDengine v2.4.0.0. If you use the TDengine server, you don't need additional steps to install taosAdapter. You can download taosAdapter from [TDengine official website](https://tdengine.com/all-downloads/) to download the TDengine server installation package (taosAdapter is included in v2.4.0.0 and later version). If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine server package on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/develop/BUILD.md) documentation. -### start/stop taosAdapter +### Start/Stop taosAdapter On Linux systems, the taosAdapter service is managed by `systemd` by default. You can use the command `systemctl start taosadapter` to start the taosAdapter service and use the command `systemctl stop taosadapter` to stop the taosAdapter service. ### Remove taosAdapter -Use the command `rmtaos` to remove the TDengine server software if you use tar.gz package or use package management command like rpm or apt to remove the TDengine server, including taosAdapter. +Use the command `rmtaos` to remove the TDengine server software if you use tar.gz package. If you installed using a .deb or .rpm package, use the corresponding command, for your package manager, like apt or rpm to remove the TDengine server, including taosAdapter. ### Upgrade taosAdapter @@ -153,8 +153,7 @@ See [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/bl ## Feature List -- Compatible with RESTful interfaces - [https://www.taosdata.com/cn/documentation/connector#restful](https://www.taosdata.com/cn/documentation/connector#restful) +- Compatible with RESTful interfaces [REST API](/reference/rest-api/) - Compatible with InfluxDB v1 write interface [https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/](https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/) - Compatible with OpenTSDB JSON and telnet format writes @@ -187,7 +186,7 @@ You can use any client that supports the http protocol to write data to or query ### InfluxDB -You can use any client that supports the http protocol to access the Restful interface address `http://:6041/` to write data in InfluxDB compatible format to TDengine. The EndPoint is as follows: +You can use any client that supports the http protocol to access the RESTful interface address `http://:6041/` to write data in InfluxDB compatible format to TDengine. The EndPoint is as follows: ```text /influxdb/v1/write @@ -204,7 +203,7 @@ Note: InfluxDB token authorization is not supported at present. Only Basic autho ### OpenTSDB -You can use any client that supports the http protocol to access the Restful interface address `http://:6041/` to write data in OpenTSDB compatible format to TDengine. +You can use any client that supports the http protocol to access the RESTful interface address `http://:6041/` to write data in OpenTSDB compatible format to TDengine. ```text /opentsdb/v1/put/json/:db @@ -241,7 +240,7 @@ node_export is an exporter of hardware and OS metrics exposed by the \*NIX kerne ## Memory usage optimization methods -taosAdapter will monitor its memory usage during operation and adjust it with two thresholds. Valid values range from -1 to 100 integers in percent of the system's physical memory. +taosAdapter will monitor its memory usage during operation and adjust it with two thresholds. Valid values are integers between 1 to 100, and represent a percentage of the system's physical memory. - pauseQueryMemoryThreshold - pauseAllMemoryThreshold @@ -277,7 +276,7 @@ Corresponding configuration parameter monitor.pauseQueryMemoryThreshold memory threshold for no more queries Environment variable `TAOS_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD` (default 70) ``` -You can adjust it according to the specific application scenario and operation strategy, and it is recommended to use operation monitoring software to monitor system memory status timely. The load balancer can also check the taosAdapter running status through this interface. +You should adjust this parameter based on your specific application scenario and operation strategy. We recommend using monitoring software to monitor system memory status. The load balancer can also check the taosAdapter running status through this interface. ## taosAdapter Monitoring Metrics @@ -326,7 +325,7 @@ You can also adjust the level of the taosAdapter log output by setting the `--lo ## How to migrate from older TDengine versions to taosAdapter -In TDengine server 2.2.x.x or earlier, the TDengine server process (taosd) contains an embedded HTTP service. As mentioned earlier, taosAdapter is a standalone software managed using `systemd` and has its process ID. And there are some configuration parameters and behaviors that are different between the two. See the following table for details. +In TDengine server 2.2.x.x or earlier, the TDengine server process (taosd) contains an embedded HTTP service. As mentioned earlier, taosAdapter is a standalone software managed using `systemd` and has its own process ID. There are some configuration parameters and behaviors that are different between the two. See the following table for details. | **#** | **embedded httpd** | **taosAdapter** | **comment** | | ----- | ------------------- | ------------------------------------ | ------------------------------------------------------------------ ------------------------------------------------------------------------ | diff --git a/docs-en/14-reference/06-taosdump.md b/docs-en/14-reference/06-taosdump.md index 973999704b595ea9b742f1ef759f973aa1f05649..a7e216398a183a096678d8d70c429606d4e5f809 100644 --- a/docs-en/14-reference/06-taosdump.md +++ b/docs-en/14-reference/06-taosdump.md @@ -12,14 +12,13 @@ taosdump can back up a database, a super table, or a normal table as a logical d Suppose the specified location already has data files. In that case, taosdump will prompt the user and exit immediately to avoid data overwriting which means that the same path can only be used for one backup. Please be careful if you see a prompt for this. -taosdump is a logical backup tool and should not be used to back up any raw data, environment settings, Users should not use taosdump to back up raw data, environment settings, hardware information, server configuration, or cluster topology. taosdump uses [Apache AVRO](https://avro.apache.org/) as the data file format to store backup data. ## Installation There are two ways to install taosdump: -- Install the taosTools official installer. Please find taosTools from [All download links](https://www.taosdata.com/all-downloads) page and download and install it. +- Install the taosTools official installer. Please find taosTools from [All download links](https://www.tdengine.com/all-downloads) page and download and install it. - Compile taos-tools separately and install it. Please refer to the [taos-tools](https://github.com/taosdata/taos-tools) repository for details. @@ -28,14 +27,14 @@ There are two ways to install taosdump: ### taosdump backup data 1. backing up all databases: specify `-A` or `-all-databases` parameter. -2. backup multiple specified databases: use `-D db1,db2,... ` parameters; 3. +2. backup multiple specified databases: use `-D db1,db2,... ` parameters; 3. back up some super or normal tables in the specified database: use `-dbname stbname1 stbname2 tbname1 tbname2 ... ` parameters. Note that the first parameter of this input sequence is the database name, and only one database is supported. The second and subsequent parameters are the names of super or normal tables in that database, separated by spaces. 4. back up the system log database: TDengine clusters usually contain a system database named `log`. The data in this database is the data that TDengine runs itself, and the taosdump will not back up the log database by default. If users need to back up the log database, users can use the `-a` or `-allow-sys` command-line parameter. 5. Loose mode backup: taosdump version 1.4.1 onwards provides `-n` and `-L` parameters for backing up data without using escape characters and "loose" mode, which can reduce the number of backups if table names, column names, tag names do not use This can reduce the backup data time and backup data footprint if table names, column names, and tag names do not use `escape character`. If you are unsure about using `-n` and `-L` conditions, please use the default parameters for "strict" mode backup. See the [official documentation](/taos-sql/escape) for a description of escaped characters. :::tip - taosdump versions after 1.4.1 provide the `-I` argument for parsing Avro file schema and data. If users specify `-s` then only taosdump will parse schema. -- Backups after taosdump 1.4.2 use the batch count specified by the `-B` parameter. The default value is 16384. If, in some environments, low network speed or disk performance causes "Error actual dump ... batch ..." can be tried by challenging the `-B` parameter to a smaller value. +- Backups after taosdump 1.4.2 use the batch count specified by the `-B` parameter. The default value is 16384. If, in some environments, low network speed or disk performance causes "Error actual dump ... batch ...", then try changing the `-B` parameter to a smaller value. ::: @@ -44,7 +43,7 @@ There are two ways to install taosdump: Restore the data file in the specified path: use the `-i` parameter plus the path to the data file. You should not use the same directory to backup different data sets, and you should not backup the same data set multiple times in the same path. Otherwise, the backup data will cause overwriting or multiple backups. :::tip -taosdump internally uses TDengine stmt binding API for writing recovery data and currently uses 16384 as one write batch for better data recovery performance. If there are more columns in the backup data, it may cause a "WAL size exceeds limit" error. You can try to adjust to a smaller value by using the `-B` parameter. +taosdump internally uses TDengine stmt binding API for writing recovery data with a default batch size of 16384 for better data recovery performance. If there are more columns in the backup data, it may cause a "WAL size exceeds limit" error. You can try to adjust the batch size to a smaller value by using the `-B` parameter. ::: diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.png b/docs-en/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.png deleted file mode 100644 index 4708f836feb21980f2db7fed4a55f799b23a6ec1..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp b/docs-en/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp new file mode 100644 index 0000000000000000000000000000000000000000..a78e18028a94c2f6a783b08d992a25c791527407 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.png b/docs-en/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.png deleted file mode 100644 index f2684e6eed70e8f56697eae42b495d6bd62815e8..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp b/docs-en/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp new file mode 100644 index 0000000000000000000000000000000000000000..b152418d0902b8ebdf62ebce6705c10dd5ab4fbf Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.png b/docs-en/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.png deleted file mode 100644 index 74686691e4106b8646c3deee1e0ce73b2f53f1ea..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp b/docs-en/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp new file mode 100644 index 0000000000000000000000000000000000000000..f58f48b7f17375cb8e62e7c0126ca3aea56a13f6 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-4-requests.png b/docs-en/14-reference/07-tdinsight/assets/TDinsight-4-requests.png deleted file mode 100644 index 27964215567f9f961c0aeaf1b863188437008fb7..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/TDinsight-4-requests.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp b/docs-en/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp new file mode 100644 index 0000000000000000000000000000000000000000..00afcce013602dce0da17bfd033f65aaa8e43bb7 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-5-database.png b/docs-en/14-reference/07-tdinsight/assets/TDinsight-5-database.png deleted file mode 100644 index b0d3abbf21ec4d4bd7bfb95fcc03a5f936b22665..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/TDinsight-5-database.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-5-database.webp b/docs-en/14-reference/07-tdinsight/assets/TDinsight-5-database.webp new file mode 100644 index 0000000000000000000000000000000000000000..567e5694f9d7a035a3eb354493d3df8ed64db251 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/TDinsight-5-database.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.png b/docs-en/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.png deleted file mode 100644 index 2b54cbeb83bcff12f20461a4f57f882e2073f231..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.webp b/docs-en/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.webp new file mode 100644 index 0000000000000000000000000000000000000000..cc8a912810f35e53a6e5fa96ea0c81e334ffc0df Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-7-login-history.png b/docs-en/14-reference/07-tdinsight/assets/TDinsight-7-login-history.png deleted file mode 100644 index eb3848657f13900c856ac595c20766465157e9c4..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/TDinsight-7-login-history.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-7-login-history.webp b/docs-en/14-reference/07-tdinsight/assets/TDinsight-7-login-history.webp new file mode 100644 index 0000000000000000000000000000000000000000..651b716bc511ba2ed5db5e6fc6b0591ef150cbf6 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/TDinsight-7-login-history.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.png b/docs-en/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.png deleted file mode 100644 index d94b2e02ac9855bb3d2f77d8902e068839db364f..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp b/docs-en/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp new file mode 100644 index 0000000000000000000000000000000000000000..8666193f59497180574fd2786266e5baabbe9761 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-full.png b/docs-en/14-reference/07-tdinsight/assets/TDinsight-full.png deleted file mode 100644 index 654df2934597ce600a1dc2dcd0cab7e29de7076d..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/TDinsight-full.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-full.webp b/docs-en/14-reference/07-tdinsight/assets/TDinsight-full.webp new file mode 100644 index 0000000000000000000000000000000000000000..7f38a76a2b899ffebc7aecd39c8ec4fd0b2da778 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/TDinsight-full.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-manager-status.png b/docs-en/14-reference/07-tdinsight/assets/alert-manager-status.png deleted file mode 100644 index e3afa22c0326d70567ec4529c83101c746daac87..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/alert-manager-status.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-manager-status.webp b/docs-en/14-reference/07-tdinsight/assets/alert-manager-status.webp new file mode 100644 index 0000000000000000000000000000000000000000..3d7fe932a23f3720e76e4217a7b5d1868d81fac8 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/alert-manager-status.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-notification-channel.png b/docs-en/14-reference/07-tdinsight/assets/alert-notification-channel.png deleted file mode 100644 index 198bf37141c86a66cdd91b47a331bcdeb83daaf8..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/alert-notification-channel.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-notification-channel.webp b/docs-en/14-reference/07-tdinsight/assets/alert-notification-channel.webp new file mode 100644 index 0000000000000000000000000000000000000000..517123954efe4b94485fdab2e07be0d765f5daa2 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/alert-notification-channel.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-query-demo.png b/docs-en/14-reference/07-tdinsight/assets/alert-query-demo.png deleted file mode 100644 index ace3aa3c2f8f14fabdac54bc25ae2d9449445b69..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/alert-query-demo.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-query-demo.webp b/docs-en/14-reference/07-tdinsight/assets/alert-query-demo.webp new file mode 100644 index 0000000000000000000000000000000000000000..6666296ac16e7a0c0ab3db23f0517f2089d09035 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/alert-query-demo.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.png b/docs-en/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.png deleted file mode 100644 index 7082e49f6beb8690c36f98a3f4ff2befdb8fd014..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.webp b/docs-en/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.webp new file mode 100644 index 0000000000000000000000000000000000000000..6f74bc3a47a32de661ef25f787a947d823715810 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-rule-test.png b/docs-en/14-reference/07-tdinsight/assets/alert-rule-test.png deleted file mode 100644 index ffd4911b53854c42dbf0ff11838cb604fa694138..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/alert-rule-test.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-rule-test.webp b/docs-en/14-reference/07-tdinsight/assets/alert-rule-test.webp new file mode 100644 index 0000000000000000000000000000000000000000..acda3b24a6263815ac8b658709d2172300ca3b00 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/alert-rule-test.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-button.png b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-button.png deleted file mode 100644 index 802c7366f921301bd7fbc62458e56b2d1eaf195c..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-button.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-button.webp b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-button.webp new file mode 100644 index 0000000000000000000000000000000000000000..903e236e2a776dfef7f85c014662e8913a9033a5 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-button.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.png b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.png deleted file mode 100644 index 019ec921b6f808671f4f864ddf3380159d4a0dcc..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.webp b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.webp new file mode 100644 index 0000000000000000000000000000000000000000..14fcfe9d183e8804199708ae4492d0904a7c9d62 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-test.png b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-test.png deleted file mode 100644 index 3963abb4ea8ae0e6f5557466f7a5b746c2d2ea3c..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-test.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-test.webp b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-test.webp new file mode 100644 index 0000000000000000000000000000000000000000..00b50cc619b030d1fb2be3a367183901d5c833e8 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-test.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource.png b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource.png deleted file mode 100644 index 837100464b35a5cafac474723aef603f91945ebc..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource.webp b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource.webp new file mode 100644 index 0000000000000000000000000000000000000000..06d0ff6ed50091a6340508bc5b2b3f78b65dcb18 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-display.png b/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-display.png deleted file mode 100644 index 98223df25499effac343ff5723544a3c289f18fa..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-display.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-display.webp b/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-display.webp new file mode 100644 index 0000000000000000000000000000000000000000..e2ec052b91e439a817f6e88b8afd0fcb4dcb7ef8 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-display.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-import-options.png b/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-import-options.png deleted file mode 100644 index 07aba348f02b4fb8ef68e79664920c119b842d4c..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-import-options.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-import-options.webp b/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-import-options.webp new file mode 100644 index 0000000000000000000000000000000000000000..665c035f9755b9472aee33cd61d3ab52831194b5 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-import-options.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-import-dashboard.png b/docs-en/14-reference/07-tdinsight/assets/howto-import-dashboard.png deleted file mode 100644 index 7e28939ead8bf3b6e2b4330e4f9b59c2e39b5c1c..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/howto-import-dashboard.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-import-dashboard.webp b/docs-en/14-reference/07-tdinsight/assets/howto-import-dashboard.webp new file mode 100644 index 0000000000000000000000000000000000000000..7dc42eeba919fee7b438a453c00bb9fd0ac2d274 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/howto-import-dashboard.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/import-dashboard-15167.png b/docs-en/14-reference/07-tdinsight/assets/import-dashboard-15167.png deleted file mode 100644 index 981f640b14d18aa6f0682768d8405a232df500f6..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/import-dashboard-15167.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/import-dashboard-15167.webp b/docs-en/14-reference/07-tdinsight/assets/import-dashboard-15167.webp new file mode 100644 index 0000000000000000000000000000000000000000..7ef081900f8de99c859193b69d49b3d6bc187909 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/import-dashboard-15167.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.png b/docs-en/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.png deleted file mode 100644 index 94ef4fa5fe63e535118a81707b413c028ce01f70..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.webp b/docs-en/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.webp new file mode 100644 index 0000000000000000000000000000000000000000..602452fc4c89424d8e17d46d74949b69be84dbe8 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.png b/docs-en/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.png deleted file mode 100644 index 670cacc377c2801fa9437c3c132c5c7fbc361b0f..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.webp b/docs-en/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.webp new file mode 100644 index 0000000000000000000000000000000000000000..35a3ebba781f24dbb0066993d1ca2f02659997d2 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/import_dashboard.png b/docs-en/14-reference/07-tdinsight/assets/import_dashboard.png deleted file mode 100644 index d74cd36c96ee0fd24ddc6feae2da07824816f745..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/import_dashboard.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/import_dashboard.webp b/docs-en/14-reference/07-tdinsight/assets/import_dashboard.webp new file mode 100644 index 0000000000000000000000000000000000000000..fb7958f1b9fbd43c8f63136024842790e711c490 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/import_dashboard.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/tdengine_dashboard.png b/docs-en/14-reference/07-tdinsight/assets/tdengine_dashboard.png deleted file mode 100644 index 0101e7430cb2ef673818de8bd3af53d0d082ad3f..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/tdengine_dashboard.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/tdengine_dashboard.webp b/docs-en/14-reference/07-tdinsight/assets/tdengine_dashboard.webp new file mode 100644 index 0000000000000000000000000000000000000000..49f1d88f4ad93286cd8582536e82b4dcc4ff271b Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/tdengine_dashboard.webp differ diff --git a/docs-en/14-reference/07-tdinsight/index.md b/docs-en/14-reference/07-tdinsight/index.md index 4850cecb334ff24cc9fcf3b9a6e394827730111c..16bae615c04ab92e4934418d6c0a3aaf1e1ccde8 100644 --- a/docs-en/14-reference/07-tdinsight/index.md +++ b/docs-en/14-reference/07-tdinsight/index.md @@ -61,7 +61,7 @@ sudo yum install \ ## Automated deployment of TDinsight -We provide an installation script [`TDinsight.sh`](https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh) script to allow users to configure the installation automatically and quickly. +We provide an installation script [`TDinsight.sh`](https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh) to allow users to configure the installation automatically and quickly. You can download the script via `wget` or other tools: @@ -233,33 +233,33 @@ The default username/password is `admin`. Grafana will require a password change Point to the **Configurations** -> **Data Sources** menu, and click the **Add data source** button. -![Add data source button](./assets/howto-add-datasource-button.png) +![TDengine Database TDinsight Add data source button](./assets/howto-add-datasource-button.webp) Search for and select **TDengine**. -![Add datasource](./assets/howto-add-datasource-tdengine.png) +![TDengine Database TDinsight Add datasource](./assets/howto-add-datasource-tdengine.webp) Configure the TDengine datasource. -![Datasource Configuration](./assets/howto-add-datasource.png) +![TDengine Database TDinsight Datasource Configuration](./assets/howto-add-datasource.webp) Save and test. It will report 'TDengine Data source is working' under normal circumstances. -![datasource test](./assets/howto-add-datasource-test.png) +![TDengine Database TDinsight datasource test](./assets/howto-add-datasource-test.webp) ### Importing dashboards Point to **+** / **Create** - **import** (or `/dashboard/import` url). -![Import Dashboard and Configuration](./assets/import_dashboard.png) +![TDengine Database TDinsight Import Dashboard and Configuration](./assets/import_dashboard.webp) Type the dashboard ID `15167` in the **Import via grafana.com** location and **Load**. -![Import via grafana.com](./assets/import-dashboard-15167.png) +![TDengine Database TDinsight Import via grafana.com](./assets/import-dashboard-15167.webp) Once the import is complete, the full page view of TDinsight is shown below. -![show](./assets/TDinsight-full.png) +![TDengine Database TDinsight show](./assets/TDinsight-full.webp) ## TDinsight dashboard details @@ -269,7 +269,7 @@ Details of the metrics are as follows. ### Cluster Status -![tdinsight-mnodes-overview](./assets/TDinsight-1-cluster-status.png) +![TDengine Database TDinsight mnodes overview](./assets/TDinsight-1-cluster-status.webp) This section contains the current information and status of the cluster, the alert information is also here (from left to right, top to bottom). @@ -289,7 +289,7 @@ This section contains the current information and status of the cluster, the ale ### DNodes Status -![tdinsight-mnodes-overview](./assets/TDinsight-2-dnodes.png) +![TDengine Database TDinsight mnodes overview](./assets/TDinsight-2-dnodes.webp) - **DNodes Status**: simple table view of `show dnodes`. - **DNodes Lifetime**: the time elapsed since the dnode was created. @@ -298,14 +298,14 @@ This section contains the current information and status of the cluster, the ale ### MNode Overview -![tdinsight-mnodes-overview](./assets/TDinsight-3-mnodes.png) +![TDengine Database TDinsight mnodes overview](./assets/TDinsight-3-mnodes.webp) -1. **MNodes Status**: a simple table view of `show mnodes`. 2. +1. **MNodes Status**: a simple table view of `show mnodes`. 2. **MNodes Number**: similar to `DNodes Number`, the number of MNodes changes. ### Request -![tdinsight-requests](./assets/TDinsight-4-requests.png) +![TDengine Database TDinsight tdinsight requests](./assets/TDinsight-4-requests.webp) 1. **Requests Rate(Inserts per Second)**: average number of inserts per second. 2. **Requests (Selects)**: number of query requests and change rate (count of second). @@ -313,46 +313,46 @@ This section contains the current information and status of the cluster, the ale ### Database -![tdinsight-database](./assets/TDinsight-5-database.png) +![TDengine Database TDinsight database](./assets/TDinsight-5-database.webp) Database usage, repeated for each value of the variable `$database` i.e. multiple rows per database. -1. **STables**: number of super tables. 2. -2. **Total Tables**: number of all tables. 3. -3. **Sub Tables**: the number of all super table sub-tables. 4. +1. **STables**: number of super tables. +2. **Total Tables**: number of all tables. +3. **Sub Tables**: the number of all super table subtables. 4. **Tables**: graph of all normal table numbers over time. 5. **Tables Number Foreach VGroups**: The number of tables contained in each VGroups. ### DNode Resource Usage -![dnode-usage](./assets/TDinsight-6-dnode-usage.png) +![TDengine Database TDinsight dnode usage](./assets/TDinsight-6-dnode-usage.webp) Data node resource usage display with repeated multiple rows for the variable `$fqdn` i.e., each data node. Includes. 1. **Uptime**: the time elapsed since the dnode was created. -2. **Has MNodes?**: whether the current dnode is a mnode. 3. -3. **CPU Cores**: the number of CPU cores. 4. -4. **VNodes Number**: the number of VNodes in the current dnode. 5. -5. **VNodes Masters**: the number of vnodes in the master role. 6. +2. **Has MNodes?**: whether the current dnode is a mnode. +3. **CPU Cores**: the number of CPU cores. +4. **VNodes Number**: the number of VNodes in the current dnode. +5. **VNodes Masters**: the number of vnodes in the master role. 6. **Current CPU Usage of taosd**: CPU usage rate of taosd processes. 7. **Current Memory Usage of taosd**: memory usage of taosd processes. 8. **Disk Used**: The total disk usage percentage of the taosd data directory. -9. **CPU Usage**: Process and system CPU usage. 10. +9. **CPU Usage**: Process and system CPU usage. 10. **RAM Usage**: Time series view of RAM usage metrics. 11. **Disk Used**: Disks used at each level of multi-level storage (default is level0). 12. **Disk Increasing Rate per Minute**: Percentage increase or decrease in disk usage per minute. -13. **Disk IO**: Disk IO rate. 14. +13. **Disk IO**: Disk IO rate. 14. **Net IO**: Network IO, the aggregate network IO rate in addition to the local network. ### Login History -![Login History](./assets/TDinsight-7-login-history.png) +![TDengine Database TDinsight Login History](./assets/TDinsight-7-login-history.webp) Currently, only the number of logins per minute is reported. ### Monitoring taosAdapter -![taosadapter](./assets/TDinsight-8-taosadapter.png) +![TDengine Database TDinsight monitor taosadapter](./assets/TDinsight-8-taosadapter.webp) Support monitoring taosAdapter request statistics and status details. Includes. @@ -376,7 +376,7 @@ TDinsight installed via the `TDinsight.sh` script can be cleaned up using the co To completely uninstall TDinsight during a manual installation, you need to clean up the following. 1. the TDinsight Dashboard in Grafana. -2. the Data Source in Grafana. 3. +2. the Data Source in Grafana. 3. remove the `tdengine-datasource` plugin from the plugin installation directory. ## Integrated Docker Example diff --git a/docs-en/14-reference/08-taos-shell.md b/docs-en/14-reference/08-taos-shell.md index fe5e5f2bc29509a4b96646253732076c7a6ee7ea..002b515093258152e85dd9d7437e424dfa98c874 100644 --- a/docs-en/14-reference/08-taos-shell.md +++ b/docs-en/14-reference/08-taos-shell.md @@ -1,14 +1,14 @@ --- -title: TDengine Command Line (CLI) -sidebar_label: TDengine CLI +title: TDengine Command Line Interface (CLI) +sidebar_label: Command Line Interface description: Instructions and tips for using the TDengine CLI --- -The TDengine command-line application (hereafter referred to as `TDengine CLI`) is the most simplest way for users to manipulate and interact with TDengine instances. +The TDengine command-line interface (hereafter referred to as `TDengine CLI`) is the simplest way for users to manipulate and interact with TDengine instances. ## Installation -If executed on the TDengine server-side, there is no need for additional installation steps to install TDengine CLI as it is already included and installed automatically. To run TDengine CLI on the environment which no TDengine server running, the TDengine client installation package needs to be installed first. For details, please refer to [connector](/reference/connector/). +If executed on the TDengine server-side, there is no need for additional installation steps to install TDengine CLI as it is already included and installed automatically. To run TDengine CLI in an environment where no TDengine server is running, the TDengine client installation package needs to be installed first. For details, please refer to [connector](/reference/connector/). ## Execution diff --git a/docs-en/14-reference/11-docker/index.md b/docs-en/14-reference/11-docker/index.md index 4ca84be369e14b3223e8609e06c9ebc4e35eaa2d..f532a263d88def21bd8b0fe9c59adaf982ee2404 100644 --- a/docs-en/14-reference/11-docker/index.md +++ b/docs-en/14-reference/11-docker/index.md @@ -315,13 +315,13 @@ password: taosdata taoslog-td2: ``` - :::note +:::note - The `VERSION` environment variable is used to set the tdengine image tag - `TAOS_FIRST_EP` must be set on the newly created instance so that it can join the TDengine cluster; if there is a high availability requirement, `TAOS_SECOND_EP` needs to be used at the same time - `TAOS_REPLICA` is used to set the default number of database replicas. Its value range is [1,3] - We recommend setting with `TAOS_ARBITRATOR` to use arbitrator in a two-nodes environment. - ::: - + We recommend setting it with `TAOS_ARBITRATOR` to use arbitrator in a two-nodes environment. + + ::: 2. Start the cluster diff --git a/docs-en/14-reference/12-config/index.md b/docs-en/14-reference/12-config/index.md index c4e7cc523c400ea5be6610b64f1561246b1bfa24..10e23bbdb85c1aa65ffa021d3d7a7fdaf7b77b09 100644 --- a/docs-en/14-reference/12-config/index.md +++ b/docs-en/14-reference/12-config/index.md @@ -65,7 +65,7 @@ taos --dump-config | ------------- | ------------------------------------------------------------------------ | | Applicable | Server Only | | Meaning | The FQDN of the host where `taosd` will be started. It can be IP address | -| Default Value | The first hostname configured for the hos | +| Default Value | The first hostname configured for the host | | Note | It should be within 96 bytes | ### serverPort @@ -78,7 +78,7 @@ taos --dump-config | Note | REST service is provided by `taosd` before 2.4.0.0 but by `taosAdapter` after 2.4.0.0, the default port of REST service is 6041 | :::note -TDengine uses continuous 13 ports, both TCP and TCP, from the port specified by `serverPort`. These ports need to be kept as open if firewall is enabled. Below table describes the ports used by TDengine in details. +TDengine uses continuous 13 ports, both TCP and UDP, from the port specified by `serverPort`. These ports need to be kept open if firewall is enabled. Below table describes the ports used by TDengine in details. ::: @@ -182,8 +182,8 @@ TDengine uses continuous 13 ports, both TCP and TCP, from the port specified by | ------------- | -------------------------------------------- | | Applicable | Server Only | | Meaning | The maximum number of distinct rows returned | -| Value Range | [100,000 - 100, 000, 000] | -| Default Value | 100, 000 | +| Value Range | [100,000 - 100,000,000] | +| Default Value | 100,000 | | Note | After version 2.3.0.0 | ## Locale Parameters @@ -202,7 +202,7 @@ To handle the data insertion and data query from multiple timezones, Unix Timest On Linux system, TDengine clients automatically obtain timezone from the host. Alternatively, the timezone can be configured explicitly in configuration file `taos.cfg` like below. ``` -timezone UTC-8 +timezone UTC-7 timezone GMT-8 timezone Asia/Shanghai ``` @@ -240,7 +240,7 @@ To avoid the problems of using time strings, Unix timestamp can be used directly | Default Value | Locale configured in host | :::info -A specific type "nchar" is provided in TDengine to store non-ASCII characters such as Chinese, Japanese, Korean. The characters to be stored in nchar type are firstly encoded in UCS4-LE before sending to server side. To store non-ASCII characters correctly, the encoding format of the client side needs to be set properly. +A specific type "nchar" is provided in TDengine to store non-ASCII characters such as Chinese, Japanese, and Korean. The characters to be stored in nchar type are firstly encoded in UCS4-LE before sending to server side. To store non-ASCII characters correctly, the encoding format of the client side needs to be set properly. The characters input on the client side are encoded using the default system encoding, which is UTF-8 on Linux, or GB18030 or GBK on some systems in Chinese, POSIX in docker, CP936 on Windows in Chinese. The encoding of the operating system in use must be set correctly so that the characters in nchar type can be converted to UCS4-LE. @@ -779,7 +779,7 @@ To prevent system resource from being exhausted by multiple concurrent streams, :::note HTTP server had been provided by `taosd` prior to version 2.4.0.0, now is provided by `taosAdapter` after version 2.4.0.0. -The parameters described in this section are only application in versions prior to 2.4.0.0. If you are using any version from 2.4.0.0, please refer to [taosAdapter]](/reference/taosadapter/). +The parameters described in this section are only application in versions prior to 2.4.0.0. If you are using any version from 2.4.0.0, please refer to [taosAdapter](/reference/taosadapter/). ::: diff --git a/docs-en/14-reference/12-directory.md b/docs-en/14-reference/12-directory.md index dbdba2b715bb41baf9b70dce91a3065e585d0434..304e3bcb434ee9a6ba338577a4d1ba546b548e3f 100644 --- a/docs-en/14-reference/12-directory.md +++ b/docs-en/14-reference/12-directory.md @@ -32,7 +32,7 @@ All executable files of TDengine are in the _/usr/local/taos/bin_ directory by d - _taosd-dump-cfg.gdb_: script to facilitate debugging of taosd's gdb execution. :::note -taosdump after version 2.4.0.0 require taosTools as a standalone installation. A few version taosBenchmark is include in taosTools too. +taosdump after version 2.4.0.0 require taosTools as a standalone installation. A new version of taosBenchmark is include in taosTools too. ::: :::tip diff --git a/docs-en/14-reference/13-schemaless/13-schemaless.md b/docs-en/14-reference/13-schemaless/13-schemaless.md index d9ce9b434dd14a89d243b2ed629f3fde64e6aba0..2393cbe346dc5c50f19d36e8a6e3f49015a9e259 100644 --- a/docs-en/14-reference/13-schemaless/13-schemaless.md +++ b/docs-en/14-reference/13-schemaless/13-schemaless.md @@ -3,17 +3,17 @@ title: Schemaless Writing description: "The Schemaless write method eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data as it is written to the interface." --- -In IoT applications, many data items are often collected for intelligent control, business analysis, device monitoring, etc. Due to the version upgrade of the application logic, or the hardware adjustment of the device itself, the data collection items may change more frequently. To facilitate the data logging work in such cases, TDengine starting from version 2.2.0.0, it provides a series of interfaces to the schemaless writing method, which eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data as the data is written to the interface. And when necessary, Schemaless writing will automatically add the required columns to ensure that the data written by the user is stored correctly. +In IoT applications, many data items are often collected for intelligent control, business analysis, device monitoring, etc. Due to the version upgrades of the application logic, or the hardware adjustment of the devices themselves, the data collection items may change frequently. To facilitate the data logging work in such cases, TDengine starting from version 2.2.0.0 provides a series of interfaces to the schemaless writing method, which eliminate the need to create super tables and subtables in advance by automatically creating the storage structure corresponding to the data as the data is written to the interface. And when necessary, schemaless writing will automatically add the required columns to ensure that the data written by the user is stored correctly. -The schemaless writing method creates super tables and their corresponding sub-tables completely indistinguishable from the super tables and sub-tables created directly via SQL. You can write data directly to them via SQL statements. Note that the names of tables created by schemaless writing are based on fixed mapping rules for tag values, so they are not explicitly ideographic and lack readability. +The schemaless writing method creates super tables and their corresponding subtables completely indistinguishable from the super tables and subtables created directly via SQL. You can write data directly to them via SQL statements. Note that the names of tables created by schemaless writing are based on fixed mapping rules for tag values, so they are not explicitly ideographic and lack readability. ## Schemaless Writing Line Protocol -TDengine's schemaless writing line protocol supports to be compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. However, when using these three protocols, you need to specify in the API the standard of the parsing protocol to be used for the input content. +TDengine's schemaless writing line protocol supports InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. However, when using these three protocols, you need to specify in the API the standard of the parsing protocol to be used for the input content. For the standard writing protocols of InfluxDB and OpenTSDB, please refer to the documentation of each protocol. The following is a description of TDengine's extended protocol, based on InfluxDB's line protocol first. They allow users to control the (super table) schema more granularly. -With the following formatting conventions, Schemaless writing uses a single string to express a data row (multiple rows can be passed into the writing API at once to enable bulk writing). +With the following formatting conventions, schemaless writing uses a single string to express a data row (multiple rows can be passed into the writing API at once to enable bulk writing). ```json measurement,tag_set field_set timestamp @@ -23,7 +23,7 @@ where : - measurement will be used as the data table name. It will be separated from tag_set by a comma. - tag_set will be used as tag data in the format `=,=`, i.e. multiple tags' data can be separated by a comma. It is separated from field_set by space. -- field_set will be used as normal column data in the format of `=,=`, again using a comma to separate multiple normal columns of data. It is separated from the timestamp by space. +- field_set will be used as normal column data in the format of `=,=`, again using a comma to separate multiple normal columns of data. It is separated from the timestamp by a space. - The timestamp is the primary key corresponding to the data in this row. All data in tag_set is automatically converted to the NCHAR data type and does not require double quotes ("). @@ -32,7 +32,7 @@ In the schemaless writing data line protocol, each data item in the field_set ne - If there are English double quotes on both sides, it indicates the BINARY(32) type. For example, `"abc"`. - If there are double quotes on both sides and an L prefix, it means NCHAR(32) type. For example, `L"error message"`. -- Spaces, equal signs (=), commas (,), and double quotes (") need to be escaped with a backslash (\) in front. (All refer to the ASCII character) +- Spaces, equal signs (=), commas (,), and double quotes (") need to be escaped with a backslash (\\) in front. (All refer to the ASCII character) - Numeric types will be distinguished from data types by the suffix. | **Serial number** | **Postfix** | **Mapping type** | **Size (bytes)** | @@ -58,26 +58,26 @@ Note that if the wrong case is used when describing the data type suffix, or if Schemaless writes process row data according to the following principles. -1. You can use the following rules to generate the sub-table names: first, combine the measurement name and the key and value of the label into the next string: +1. You can use the following rules to generate the subtable names: first, combine the measurement name and the key and value of the label into the next string: ```json "measurement,tag_key1=tag_value1,tag_key2=tag_value2" ``` Note that tag_key1, tag_key2 are not the original order of the tags entered by the user but the result of using the tag names in ascending order of the strings. Therefore, tag_key1 is not the first tag entered in the line protocol. -The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t*" is a fixed prefix that every table generated by this mapping relationship has. 2. +The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t*" is a fixed prefix that every table generated by this mapping relationship has. 2. If the super table obtained by parsing the line protocol does not exist, this super table is created. -If the sub-table obtained by the parse line protocol does not exist, Schemaless creates the sub-table according to the sub-table name determined in steps 1 or 2. 4. +If the subtable obtained by the parse line protocol does not exist, Schemaless creates the sub-table according to the subtable name determined in steps 1 or 2. 4. If the specified tag or regular column in the data row does not exist, the corresponding tag or regular column is added to the super table (only incremental). 5. If there are some tag columns or regular columns in the super table that are not specified to take values in a data row, then the values of these columns are set to NULL. 6. For BINARY or NCHAR columns, if the length of the value provided in a data row exceeds the column type limit, the maximum length of characters allowed to be stored in the column is automatically increased (only incremented and not decremented) to ensure complete preservation of the data. -7. If the specified data sub-table already exists, and the specified tag column takes a value different from the saved value this time, the value in the latest data row overwrites the old tag column take value. +7. If the specified data subtable already exists, and the specified tag column takes a value different from the saved value this time, the value in the latest data row overwrites the old tag column take value. 8. Errors encountered throughout the processing will interrupt the writing process and return an error code. :::tip All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed -16k bytes. See [TAOS SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area. +48k bytes. See [TAOS SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area. ::: ## Time resolution recognition diff --git a/docs-en/14-reference/_collectd.mdx b/docs-en/14-reference/_collectd.mdx index 1f57d883eec9feadc3cc460bf968b0dd43fedfe8..ce88328098a181de48dcaa080ef45f228b20bf1c 100644 --- a/docs-en/14-reference/_collectd.mdx +++ b/docs-en/14-reference/_collectd.mdx @@ -25,7 +25,7 @@ The default database name written by taosAdapter is `collectd`. You can also mod #collectd collectd uses a plugin mechanism to write the collected monitoring data to different data storage software in various forms. tdengine supports both direct collection plugins and write_tsdb plugins. -#### is configured to receive data from the direct collection plugin +#### Configure the direct collection plugin Modify the relevant configuration items in the collectd configuration file (default location /etc/collectd/collectd.conf). @@ -62,7 +62,7 @@ LoadPlugin write_tsdb ``` -Where fills in the server's domain name or IP address running taosAdapter. Fill in the data that taosAdapter uses to receive the collectd write_tsdb plugin (default is 6047). +Where is the domain name or IP address of the server running taosAdapter. Fill in the data that taosAdapter uses to receive the collectd write_tsdb plugin (default is 6047). ```text LoadPlugin write_tsdb diff --git a/docs-en/14-reference/index.md b/docs-en/14-reference/index.md index 89f675902d01ba2d2c1b322408c372429d6bda1c..f350eebfc1a1ca2feaedc18c4b4fa798742e31b4 100644 --- a/docs-en/14-reference/index.md +++ b/docs-en/14-reference/index.md @@ -2,11 +2,11 @@ title: Reference --- -The reference guide is the detailed introduction to TDengine, various TDengine's connectors in different languages, and the tools that come with it. +The reference guide is a detailed introduction to TDengine including various TDengine connectors in different languages, and the tools that come with TDengine. ```mdx-code-block import DocCardList from '@theme/DocCardList'; import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; -``` \ No newline at end of file +``` diff --git a/docs-en/14-reference/taosAdapter-architecture.png b/docs-en/14-reference/taosAdapter-architecture.png deleted file mode 100644 index 08a9018553aae6f86b42d127b372d0cecfa9bdf8..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/taosAdapter-architecture.png and /dev/null differ diff --git a/docs-en/14-reference/taosAdapter-architecture.webp b/docs-en/14-reference/taosAdapter-architecture.webp new file mode 100644 index 0000000000000000000000000000000000000000..a4162b0a037c06d34191784716c51080b9f8a570 Binary files /dev/null and b/docs-en/14-reference/taosAdapter-architecture.webp differ diff --git a/docs-en/20-third-party/01-grafana.mdx b/docs-en/20-third-party/01-grafana.mdx index c1bfd4a96a4576df8570d8b480d5c2afe47e20b8..dc2033ae6f789908d4d9f9ecd96c9396748c4400 100644 --- a/docs-en/20-third-party/01-grafana.mdx +++ b/docs-en/20-third-party/01-grafana.mdx @@ -23,7 +23,7 @@ You can download The Grafana plugin for TDengine from Data Sources` on the left side, as shown in the following figure. -![img](./grafana/add_datasource1.jpg) +![TDengine Database TDinsight plugin add datasource 1](./grafana/add_datasource1.webp) Click `Add data source` to enter the Add data source page, and enter TDengine in the query box to add it, as shown in the following figure. -![img](./grafana/add_datasource2.jpg) +![TDengine Database TDinsight plugin add datasource 2](./grafana/add_datasource2.webp) Enter the datasource configuration page, and follow the default prompts to modify the corresponding configuration. -![img](./grafana/add_datasource3.jpg) +![TDengine Database TDinsight plugin add database 3](./grafana/add_datasource3.webp) - Host: IP address of the server where the components of the TDengine cluster provide REST service (offered by taosd before 2.4 and by taosAdapter since 2.4) and the port number of the TDengine REST service (6041), by default use `http://localhost:6041`. - User: TDengine user name. @@ -78,23 +78,23 @@ Enter the datasource configuration page, and follow the default prompts to modif Click `Save & Test` to test. Follows are a success. -![img](./grafana/add_datasource4.jpg) +![TDengine Database TDinsight plugin add database 4](./grafana/add_datasource4.webp) ### Create Dashboard Go back to the main interface to create the Dashboard, click Add Query to enter the panel query page: -![img](./grafana/create_dashboard1.jpg) +![TDengine Database TDinsight plugin create dashboard 1](./grafana/create_dashboard1.webp) As shown above, select the `TDengine` data source in the `Query` and enter the corresponding SQL in the query box below for query. -- INPUT SQL: enter the statement to be queried (the result set of the SQL statement should be two columns and multiple rows), for example: `select avg(mem_system) from log.dn where ts >= $from and ts < $to interval($interval)`, where, from, to and interval are built-in variables of the TDengine plugin, indicating the range and time interval of queries fetched from the Grafana plugin panel. In addition to the built-in variables, ` custom template variables are also supported. +- INPUT SQL: enter the statement to be queried (the result set of the SQL statement should be two columns and multiple rows), for example: `select avg(mem_system) from log.dn where ts >= $from and ts < $to interval($interval)`, where, from, to and interval are built-in variables of the TDengine plugin, indicating the range and time interval of queries fetched from the Grafana plugin panel. In addition to the built-in variables, custom template variables are also supported. - ALIAS BY: This allows you to set the current query alias. - GENERATE SQL: Clicking this button will automatically replace the corresponding variables and generate the final executed statement. Follow the default prompt to query the average system memory usage for the specified interval on the server where the current TDengine deployment is located as follows. -![img](./grafana/create_dashboard2.jpg) +![TDengine Database TDinsight plugin create dashboard 2](./grafana/create_dashboard2.webp) > For more information on how to use Grafana to create the appropriate monitoring interface and for more details on using Grafana, refer to the official Grafana [documentation](https://grafana.com/docs/). diff --git a/docs-en/20-third-party/05-collectd.md b/docs-en/20-third-party/05-collectd.md index 609e55842ab35cdc2d394663f5450f908e49f7f7..db62f2ecd1afb4936466ca0243a7e14ff294f8b6 100644 --- a/docs-en/20-third-party/05-collectd.md +++ b/docs-en/20-third-party/05-collectd.md @@ -6,7 +6,7 @@ title: collectd writing import CollectD from "../14-reference/_collectd.mdx" -collectd is a daemon used to collect system performance metric data. collectd provides various storage mechanisms to store different values. It periodically counts system performance statistics number while the system is running and storing information. You can use this information to help identify current system performance bottlenecks and predict future system load. +collectd is a daemon used to collect system performance metric data. collectd provides various storage mechanisms to store different values. It periodically counts system performance statistics while the system is running and storing information. You can use this information to help identify current system performance bottlenecks and predict future system load. You can write the data collected by collectd to TDengine by simply modifying the configuration of collectd to the domain name (or IP address) and corresponding port of the server running taosAdapter. It can take full advantage of TDengine's efficient storage query performance and clustering capability for time-series data. diff --git a/docs-en/20-third-party/09-emq-broker.md b/docs-en/20-third-party/09-emq-broker.md index 13562ba7f720499c23771437c5c6ba0f61819456..738372cabd736c0be47b4080cc2c984e5110236c 100644 --- a/docs-en/20-third-party/09-emq-broker.md +++ b/docs-en/20-third-party/09-emq-broker.md @@ -3,7 +3,7 @@ sidebar_label: EMQX Broker title: EMQX Broker writing --- -MQTT is a popular IoT data transfer protocol, [EMQX](https://github.com/emqx/emqx) is an open-source MQTT Broker software, without any code, only need to use "rules" in EMQX Dashboard to do simple configuration. You can write MQTT data directly to TDengine. EMQX supports saving data to TDengine by sending it to web services and provides a native TDengine driver for direct saving in the Enterprise Edition. Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use it. tdengine). +MQTT is a popular IoT data transfer protocol, [EMQX](https://github.com/emqx/emqx) is an open-source MQTT Broker software, you can write MQTT data directly to TDengine without any code, you only need to use "rules" in EMQX Dashboard to create a simple configuration. EMQX supports saving data to TDengine by sending it to web services and provides a native TDengine driver for direct saving in the Enterprise Edition. Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use it.). ## Prerequisites @@ -44,25 +44,25 @@ Since the configuration interface of EMQX differs from version to version, here Use your browser to open the URL `http://IP:18083` and log in to EMQX Dashboard. The initial installation username is `admin` and the password is: `public`. -![img](./emqx/login-dashboard.png) +![TDengine Database EMQX login dashboard](./emqx/login-dashboard.webp) ### Creating Rule Select "Rule" in the "Rule Engine" on the left and click the "Create" button: ! -![img](./emqx/rule-engine.png) +![TDengine Database EMQX rule engine](./emqx/rule-engine.webp) ### Edit SQL fields -![img](./emqx/create-rule.png) +![TDengine Database EMQX create rule](./emqx/create-rule.webp) ### Add "action handler" -![img](./emqx/add-action-handler.png) +![TDengine Database EMQX add action handler](./emqx/add-action-handler.webp) ### Add "Resource" -![img](./emqx/create-resource.png) +![TDengine Database EMQX create resource](./emqx/create-resource.webp) Select "Data to Web Service" and click the "New Resource" button. @@ -70,13 +70,13 @@ Select "Data to Web Service" and click the "New Resource" button. Select "Data to Web Service" and fill in the request URL as the address and port of the server running taosAdapter (default is 6041). Leave the other properties at their default values. -![img](./emqx/edit-resource.png) +![TDengine Database EMQX edit resource](./emqx/edit-resource.webp) ### Edit "action" Edit the resource configuration to add the key/value pairing for Authorization. Please refer to the [ TDengine REST API documentation ](https://docs.taosdata.com/reference/rest-api/) for the authorization in details. Enter the rule engine replacement template in the message body. -![img](./emqx/edit-action.png) +![TDengine Database EMQX edit action](./emqx/edit-action.webp) ## Compose program to mock data @@ -163,7 +163,7 @@ Edit the resource configuration to add the key/value pairing for Authorization. Note: `CLIENT_NUM` in the code can be set to a smaller value at the beginning of the test to avoid hardware performance be not capable to handle a more significant number of concurrent clients. -![img](./emqx/client-num.png) +![TDengine Database EMQX client num](./emqx/client-num.webp) ## Execute tests to simulate sending MQTT data @@ -172,19 +172,19 @@ npm install mqtt mockjs --save ---registry=https://registry.npm.taobao.org node mock.js ``` -![img](./emqx/run-mock.png) +![TDengine Database EMQX run mock](./emqx/run-mock.webp) ## Verify that EMQX is receiving data Refresh the EMQX Dashboard rules engine interface to see how many records were received correctly: -![img](./emqx/check-rule-matched.png) +![TDengine Database EMQX rule matched](./emqx/check-rule-matched.webp) ## Verify that data writing to TDengine Use the TDengine CLI program to log in and query the appropriate databases and tables to verify that the data is being written to TDengine correctly: -![img](./emqx/check-result-in-taos.png) +![TDengine Database EMQX result in taos](./emqx/check-result-in-taos.webp) Please refer to the [TDengine official documentation](https://docs.taosdata.com/) for more details on how to use TDengine. EMQX Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use EMQX. diff --git a/docs-en/20-third-party/11-kafka.md b/docs-en/20-third-party/11-kafka.md index b9c7a3814a75a066b498438b6e632690697ae7ca..9c78a6645a0578d3b8d494d1fa60831eb88b3c81 100644 --- a/docs-en/20-third-party/11-kafka.md +++ b/docs-en/20-third-party/11-kafka.md @@ -9,11 +9,11 @@ TDengine Kafka Connector contains two plugins: TDengine Source Connector and TDe Kafka Connect is a component of Apache Kafka that enables other systems, such as databases, cloud services, file systems, etc., to connect to Kafka easily. Data can flow from other software to Kafka via Kafka Connect and Kafka to other systems via Kafka Connect. Plugins that read data from other software are called Source Connectors, and plugins that write data to other software are called Sink Connectors. Neither Source Connector nor Sink Connector will directly connect to Kafka Broker, and Source Connector transfers data to Kafka Connect. Sink Connector receives data from Kafka Connect. -![](kafka/Kafka_Connect.png) +![TDengine Database Kafka Connector -- Kafka Connect](kafka/Kafka_Connect.webp) TDengine Source Connector is used to read data from TDengine in real-time and send it to Kafka Connect. Users can use The TDengine Sink Connector to receive data from Kafka Connect and write it to TDengine. -![](kafka/streaming-integration-with-kafka-connect.png) +![TDengine Database Kafka Connector -- streaming integration with kafka connect](kafka/streaming-integration-with-kafka-connect.webp) ## What is Confluent? @@ -26,7 +26,7 @@ Confluent adds many extensions to Kafka. include: 5. GUI for managing and monitoring Kafka - Confluent Control Center Some of these extensions are available in the community version of Confluent. Some are only available in the enterprise version. -![](kafka/confluentPlatform.png) +![TDengine Database Kafka Connector -- Confluent platform](kafka/confluentPlatform.webp) Confluent Enterprise Edition provides the `confluent` command-line tool to manage various components. @@ -194,10 +194,10 @@ If the above command is executed successfully, the output is as follows: Prepare text file as test data, its content is following: ```txt title="test-data.txt" -meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000000 -meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250000000 -meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249000000 -meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250000000 +meters,location=California.LoSangeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000000 +meters,location=California.LoSangeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250000000 +meters,location=California.LoSangeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249000000 +meters,location=California.LoSangeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250000000 ``` Use kafka-console-producer to write test data to the topic `meters`. @@ -221,14 +221,14 @@ Database changed. taos> select * from meters; ts | current | voltage | phase | groupid | location | =============================================================================================================================================================== - 2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | Beijing.Haidian | - 2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | Beijing.Haidian | - 2022-03-28 09:56:51.249000000 | 10.800000000 | 223.000000000 | 0.290000000 | 3 | Beijing.Haidian | - 2022-03-28 09:56:51.250000000 | 11.300000000 | 221.000000000 | 0.350000000 | 3 | Beijing.Haidian | + 2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LoSangeles | + 2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LoSangeles | + 2022-03-28 09:56:51.249000000 | 10.800000000 | 223.000000000 | 0.290000000 | 3 | California.LoSangeles | + 2022-03-28 09:56:51.250000000 | 11.300000000 | 221.000000000 | 0.350000000 | 3 | California.LoSangeles | Query OK, 4 row(s) in set (0.004208s) ``` -If you see the above data, the synchronization is successful. If not, check the logs of Kafka Connect. For detailed description of configuration parameters, see [Configuration Reference](#Configuration Reference). +If you see the above data, the synchronization is successful. If not, check the logs of Kafka Connect. For detailed description of configuration parameters, see [Configuration Reference](#configuration-reference). ## The use of TDengine Source Connector @@ -273,7 +273,7 @@ DROP DATABASE IF EXISTS test; CREATE DATABASE test; USE test; CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT); -INSERT INTO d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) d1002 USING meters TAGS(Beijing.Chaoyang, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) d1003 USING meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) d1003 USING meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) d1004 USING meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) d1004 USING meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000); +INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) d1002 USING meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) d1003 USING meters TAGS(California.LoSangeles, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) d1003 USING meters TAGS(California.LoSangeles, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) d1004 USING meters TAGS(California.LoSangeles, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) d1004 USING meters TAGS(California.LoSangeles, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000); ``` Use TDengine CLI to execute SQL script @@ -300,8 +300,8 @@ output: ```` ...... -meters,location="beijing.chaoyang",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000 -meters,location="beijing.chaoyang",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000 +meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000 +meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000 ...... ```` diff --git a/docs-en/20-third-party/emqx/add-action-handler.png b/docs-en/20-third-party/emqx/add-action-handler.png deleted file mode 100644 index 97a1f933ecfadfcab399938806d73c5a5ecc6427..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/emqx/add-action-handler.png and /dev/null differ diff --git a/docs-en/20-third-party/emqx/add-action-handler.webp b/docs-en/20-third-party/emqx/add-action-handler.webp new file mode 100644 index 0000000000000000000000000000000000000000..4a8d105f711991226cfbd43b6e9ab07d7ccc686a Binary files /dev/null and b/docs-en/20-third-party/emqx/add-action-handler.webp differ diff --git a/docs-en/20-third-party/emqx/check-result-in-taos.png b/docs-en/20-third-party/emqx/check-result-in-taos.png deleted file mode 100644 index c17a5c1ea2b9bbd49263056c8bf09c9aabab07d5..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/emqx/check-result-in-taos.png and /dev/null differ diff --git a/docs-en/20-third-party/emqx/check-result-in-taos.webp b/docs-en/20-third-party/emqx/check-result-in-taos.webp new file mode 100644 index 0000000000000000000000000000000000000000..8fa040a86104fece02ddaf8986f0a67de316143d Binary files /dev/null and b/docs-en/20-third-party/emqx/check-result-in-taos.webp differ diff --git a/docs-en/20-third-party/emqx/check-rule-matched.png b/docs-en/20-third-party/emqx/check-rule-matched.png deleted file mode 100644 index 9e9a466946a1afa857e2bbc07b14956dd0f984b6..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/emqx/check-rule-matched.png and /dev/null differ diff --git a/docs-en/20-third-party/emqx/check-rule-matched.webp b/docs-en/20-third-party/emqx/check-rule-matched.webp new file mode 100644 index 0000000000000000000000000000000000000000..e5a614035739df859b27c817f3b9f41be444b513 Binary files /dev/null and b/docs-en/20-third-party/emqx/check-rule-matched.webp differ diff --git a/docs-en/20-third-party/emqx/client-num.png b/docs-en/20-third-party/emqx/client-num.png deleted file mode 100644 index fff48cbf3b271c367079ddde425b3f9b014062f7..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/emqx/client-num.png and /dev/null differ diff --git a/docs-en/20-third-party/emqx/client-num.webp b/docs-en/20-third-party/emqx/client-num.webp new file mode 100644 index 0000000000000000000000000000000000000000..a151b184843607d67b649babb3145bfb3e329cda Binary files /dev/null and b/docs-en/20-third-party/emqx/client-num.webp differ diff --git a/docs-en/20-third-party/emqx/create-resource.png b/docs-en/20-third-party/emqx/create-resource.png deleted file mode 100644 index 58da4c391a3692b9f5fa348d952701eab8bcb746..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/emqx/create-resource.png and /dev/null differ diff --git a/docs-en/20-third-party/emqx/create-resource.webp b/docs-en/20-third-party/emqx/create-resource.webp new file mode 100644 index 0000000000000000000000000000000000000000..bf9cccbe49c57f925c5e6b094a4c0d88a64242cb Binary files /dev/null and b/docs-en/20-third-party/emqx/create-resource.webp differ diff --git a/docs-en/20-third-party/emqx/create-rule.png b/docs-en/20-third-party/emqx/create-rule.png deleted file mode 100644 index 73b0b6ee3e6065a142df98abe8c0dbb32b34f89d..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/emqx/create-rule.png and /dev/null differ diff --git a/docs-en/20-third-party/emqx/create-rule.webp b/docs-en/20-third-party/emqx/create-rule.webp new file mode 100644 index 0000000000000000000000000000000000000000..13e8fc83d48d2fd9d0a303c707ef3024d3ee5203 Binary files /dev/null and b/docs-en/20-third-party/emqx/create-rule.webp differ diff --git a/docs-en/20-third-party/emqx/edit-action.png b/docs-en/20-third-party/emqx/edit-action.png deleted file mode 100644 index 2a43ee369a439cf11cee23c11f25d6a84b26d7dc..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/emqx/edit-action.png and /dev/null differ diff --git a/docs-en/20-third-party/emqx/edit-action.webp b/docs-en/20-third-party/emqx/edit-action.webp new file mode 100644 index 0000000000000000000000000000000000000000..7f6d2e36a82b1917930e5d3969115db9359674a0 Binary files /dev/null and b/docs-en/20-third-party/emqx/edit-action.webp differ diff --git a/docs-en/20-third-party/emqx/edit-resource.png b/docs-en/20-third-party/emqx/edit-resource.png deleted file mode 100644 index 0a0b3560044f4ed6e0a8f040b74085a7e8948b1f..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/emqx/edit-resource.png and /dev/null differ diff --git a/docs-en/20-third-party/emqx/edit-resource.webp b/docs-en/20-third-party/emqx/edit-resource.webp new file mode 100644 index 0000000000000000000000000000000000000000..fd5d278fab16bba4e04e1c348d4086dce77abb98 Binary files /dev/null and b/docs-en/20-third-party/emqx/edit-resource.webp differ diff --git a/docs-en/20-third-party/emqx/login-dashboard.png b/docs-en/20-third-party/emqx/login-dashboard.png deleted file mode 100644 index d6c5035c98d860faf639ef6611c6719adf80c47b..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/emqx/login-dashboard.png and /dev/null differ diff --git a/docs-en/20-third-party/emqx/login-dashboard.webp b/docs-en/20-third-party/emqx/login-dashboard.webp new file mode 100644 index 0000000000000000000000000000000000000000..f84cee668fb6efe1586515ba0dee3ae2f10a5b30 Binary files /dev/null and b/docs-en/20-third-party/emqx/login-dashboard.webp differ diff --git a/docs-en/20-third-party/emqx/rule-engine.png b/docs-en/20-third-party/emqx/rule-engine.png deleted file mode 100644 index db110a837b024c82ee9d22f02dcd3a9d06abdd55..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/emqx/rule-engine.png and /dev/null differ diff --git a/docs-en/20-third-party/emqx/rule-engine.webp b/docs-en/20-third-party/emqx/rule-engine.webp new file mode 100644 index 0000000000000000000000000000000000000000..c1711c8cc757cd73fef5cb941a1818756241f7f0 Binary files /dev/null and b/docs-en/20-third-party/emqx/rule-engine.webp differ diff --git a/docs-en/20-third-party/emqx/rule-header-key-value.png b/docs-en/20-third-party/emqx/rule-header-key-value.png deleted file mode 100644 index b81b9a9684aa2f98d00b7ec21e5de411fb450312..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/emqx/rule-header-key-value.png and /dev/null differ diff --git a/docs-en/20-third-party/emqx/rule-header-key-value.webp b/docs-en/20-third-party/emqx/rule-header-key-value.webp new file mode 100644 index 0000000000000000000000000000000000000000..e645b3822dffec86f4926e78a57eaffa1e7f4d8d Binary files /dev/null and b/docs-en/20-third-party/emqx/rule-header-key-value.webp differ diff --git a/docs-en/20-third-party/emqx/run-mock.png b/docs-en/20-third-party/emqx/run-mock.png deleted file mode 100644 index 0da25818575247732d5d3a783aa52cf7ce24662c..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/emqx/run-mock.png and /dev/null differ diff --git a/docs-en/20-third-party/emqx/run-mock.webp b/docs-en/20-third-party/emqx/run-mock.webp new file mode 100644 index 0000000000000000000000000000000000000000..ed33f1666d456f1ab40ed6830af4550d4c7ca037 Binary files /dev/null and b/docs-en/20-third-party/emqx/run-mock.webp differ diff --git a/docs-en/20-third-party/grafana/add_datasource1.jpg b/docs-en/20-third-party/grafana/add_datasource1.jpg deleted file mode 100644 index 1f0f5110f312c57f3ec1788bbc02f04fac6ac142..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/grafana/add_datasource1.jpg and /dev/null differ diff --git a/docs-en/20-third-party/grafana/add_datasource1.webp b/docs-en/20-third-party/grafana/add_datasource1.webp new file mode 100644 index 0000000000000000000000000000000000000000..211edc4457abd0db6b0ef64636d61d65b5f43db6 Binary files /dev/null and b/docs-en/20-third-party/grafana/add_datasource1.webp differ diff --git a/docs-en/20-third-party/grafana/add_datasource2.jpg b/docs-en/20-third-party/grafana/add_datasource2.jpg deleted file mode 100644 index fa7a83e00e96fae649910dff4edf5f5bdadd7850..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/grafana/add_datasource2.jpg and /dev/null differ diff --git a/docs-en/20-third-party/grafana/add_datasource2.webp b/docs-en/20-third-party/grafana/add_datasource2.webp new file mode 100644 index 0000000000000000000000000000000000000000..8ab547231fee4d3b0874fcfe08c0ce152b0c53a1 Binary files /dev/null and b/docs-en/20-third-party/grafana/add_datasource2.webp differ diff --git a/docs-en/20-third-party/grafana/add_datasource3.jpg b/docs-en/20-third-party/grafana/add_datasource3.jpg deleted file mode 100644 index fc850ad08ff1174de972906842e0d5ee64e6e5cb..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/grafana/add_datasource3.jpg and /dev/null differ diff --git a/docs-en/20-third-party/grafana/add_datasource3.webp b/docs-en/20-third-party/grafana/add_datasource3.webp new file mode 100644 index 0000000000000000000000000000000000000000..d8a733360a09b4425c571f254a9ecb298c04b72f Binary files /dev/null and b/docs-en/20-third-party/grafana/add_datasource3.webp differ diff --git a/docs-en/20-third-party/grafana/add_datasource4.jpg b/docs-en/20-third-party/grafana/add_datasource4.jpg deleted file mode 100644 index 3ba73e50d455111f8621f4165746078554c2d790..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/grafana/add_datasource4.jpg and /dev/null differ diff --git a/docs-en/20-third-party/grafana/add_datasource4.webp b/docs-en/20-third-party/grafana/add_datasource4.webp new file mode 100644 index 0000000000000000000000000000000000000000..b1e0fc6e2b27df4af1bb5ad92756bcb5d4fda63e Binary files /dev/null and b/docs-en/20-third-party/grafana/add_datasource4.webp differ diff --git a/docs-en/20-third-party/grafana/create_dashboard1.jpg b/docs-en/20-third-party/grafana/create_dashboard1.jpg deleted file mode 100644 index 3b83c3a1714e9e7540e0b06239ef7c1c4f63fe2c..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/grafana/create_dashboard1.jpg and /dev/null differ diff --git a/docs-en/20-third-party/grafana/create_dashboard1.webp b/docs-en/20-third-party/grafana/create_dashboard1.webp new file mode 100644 index 0000000000000000000000000000000000000000..55eb388833e4df2a46f4d1cf6d346aa11429385d Binary files /dev/null and b/docs-en/20-third-party/grafana/create_dashboard1.webp differ diff --git a/docs-en/20-third-party/grafana/create_dashboard2.jpg b/docs-en/20-third-party/grafana/create_dashboard2.jpg deleted file mode 100644 index fe5d768ac55254251e0290bf257178f5ff28f5a5..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/grafana/create_dashboard2.jpg and /dev/null differ diff --git a/docs-en/20-third-party/grafana/create_dashboard2.webp b/docs-en/20-third-party/grafana/create_dashboard2.webp new file mode 100644 index 0000000000000000000000000000000000000000..bb40e407187718c52e9f617d8ebd3d25fd14b56b Binary files /dev/null and b/docs-en/20-third-party/grafana/create_dashboard2.webp differ diff --git a/docs-en/20-third-party/kafka/Kafka_Connect.png b/docs-en/20-third-party/kafka/Kafka_Connect.png deleted file mode 100644 index f3dc02ea2a743c6e1ae5531e14f820e3adeca29a..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/kafka/Kafka_Connect.png and /dev/null differ diff --git a/docs-en/20-third-party/kafka/Kafka_Connect.webp b/docs-en/20-third-party/kafka/Kafka_Connect.webp new file mode 100644 index 0000000000000000000000000000000000000000..8f2000a749b0a2ccec9939abd144c53c44fbe171 Binary files /dev/null and b/docs-en/20-third-party/kafka/Kafka_Connect.webp differ diff --git a/docs-en/20-third-party/kafka/confluentPlatform.png b/docs-en/20-third-party/kafka/confluentPlatform.png deleted file mode 100644 index f8e69f2c7f64d809996b2d1bf1370b67b8030850..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/kafka/confluentPlatform.png and /dev/null differ diff --git a/docs-en/20-third-party/kafka/confluentPlatform.webp b/docs-en/20-third-party/kafka/confluentPlatform.webp new file mode 100644 index 0000000000000000000000000000000000000000..ff03d4e51aaaec85f07ff41ecda0fb9bd6cb2847 Binary files /dev/null and b/docs-en/20-third-party/kafka/confluentPlatform.webp differ diff --git a/docs-en/20-third-party/kafka/streaming-integration-with-kafka-connect.png b/docs-en/20-third-party/kafka/streaming-integration-with-kafka-connect.png deleted file mode 100644 index 26d8a866d706180c900d69bb6f57ca2dff0047dd..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/kafka/streaming-integration-with-kafka-connect.png and /dev/null differ diff --git a/docs-en/20-third-party/kafka/streaming-integration-with-kafka-connect.webp b/docs-en/20-third-party/kafka/streaming-integration-with-kafka-connect.webp new file mode 100644 index 0000000000000000000000000000000000000000..120d534ec132cea2ccef6cf87a3ce680a5ac6e9c Binary files /dev/null and b/docs-en/20-third-party/kafka/streaming-integration-with-kafka-connect.webp differ diff --git a/docs-en/21-tdinternal/01-arch.md b/docs-en/21-tdinternal/01-arch.md index 9607c9b38709f6a320f82a8ee250afb407492627..16d4b7afe26107e251a542ee24b644c1d372def0 100644 --- a/docs-en/21-tdinternal/01-arch.md +++ b/docs-en/21-tdinternal/01-arch.md @@ -11,7 +11,7 @@ The design of TDengine is based on the assumption that any hardware or software Logical structure diagram of TDengine distributed architecture as following: -![TDengine architecture diagram](structure.png) +![TDengine Database architecture diagram](structure.webp)
Figure 1: TDengine architecture diagram
A complete TDengine system runs on one or more physical nodes. Logically, it includes data node (dnode), TDengine client driver (TAOSC) and application (app). There are one or more data nodes in the system, which form a cluster. The application interacts with the TDengine cluster through TAOSC's API. The following is a brief introduction to each logical unit. @@ -54,7 +54,7 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc To explain the relationship between vnode, mnode, TAOSC and application and their respective roles, the following is an analysis of a typical data writing process. -![typical process of TDengine](message.png) +![typical process of TDengine Database](message.webp)
Figure 2: Typical process of TDengine
1. Application initiates a request to insert data through JDBC, ODBC, or other APIs. @@ -123,7 +123,7 @@ If a database has N replicas, thus a virtual node group has N virtual nodes, but Master Vnode uses a writing process as follows: -![TDengine Master Writing Process](write_master.png) +![TDengine Database Master Writing Process](write_master.webp)
Figure 3: TDengine Master writing process
1. Master vnode receives the application data insertion request, verifies, and moves to next step; @@ -137,7 +137,7 @@ Master Vnode uses a writing process as follows: For a slave vnode, the write process as follows: -![TDengine Slave Writing Process](write_slave.png) +![TDengine Database Slave Writing Process](write_slave.webp)
Figure 4: TDengine Slave Writing Process
1. Slave vnode receives a data insertion request forwarded by Master vnode; @@ -267,7 +267,7 @@ For the data collected by device D1001, the number of records per hour is counte TDengine creates a separate table for each data collection point, but in practical applications, it is often necessary to aggregate data from different data collection points. In order to perform aggregation operations efficiently, TDengine introduces the concept of STable. STable is used to represent a specific type of data collection point. It is a table set containing multiple tables. The schema of each table in the set is the same, but each table has its own static tag. The tags can be multiple and be added, deleted and modified at any time. Applications can aggregate or statistically operate all or a subset of tables under a STABLE by specifying tag filters, thus greatly simplifying the development of applications. The process is shown in the following figure: -![Diagram of multi-table aggregation query](multi_tables.png) +![TDengine Database Diagram of multi-table aggregation query](multi_tables.webp)
Figure 5: Diagram of multi-table aggregation query
1. Application sends a query condition to system; diff --git a/docs-en/21-tdinternal/30-iot-big-data.md b/docs-en/21-tdinternal/30-iot-big-data.md deleted file mode 100644 index 4bdf5cfba98234c9d843634b5210ca3dae94d870..0000000000000000000000000000000000000000 --- a/docs-en/21-tdinternal/30-iot-big-data.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: IoT Big Data -description: "Characteristics of IoT Big Data, why general big data platform does not work well for IoT? The required features for an IoT Big Data Platform" ---- - -- [Characteristics of IoT Big Data](https://tdengine.com/2019/07/09/86.html) -- [Why don’t General Big Data Platforms Fit IoT Scenarios?](https://tdengine.com/2019/07/09/92.html) -- [Why TDengine is the Best Choice for IoT Big Data Processing?](https://tdengine.com/2019/07/09/94.html) -- [Why Redis, Kafka, Spark aren’t Needed if TDengine is Used in the IoT Platform?](https://tdengine.com/2019/07/09/96.html) - diff --git a/docs-en/21-tdinternal/dnode.png b/docs-en/21-tdinternal/dnode.png deleted file mode 100644 index cea87dcccba5d2761996e5dde998022d86487eb9..0000000000000000000000000000000000000000 Binary files a/docs-en/21-tdinternal/dnode.png and /dev/null differ diff --git a/docs-en/21-tdinternal/dnode.webp b/docs-en/21-tdinternal/dnode.webp new file mode 100644 index 0000000000000000000000000000000000000000..a56c7e4594df00a721cb48381d68ca3bc813cdc8 Binary files /dev/null and b/docs-en/21-tdinternal/dnode.webp differ diff --git a/docs-en/21-tdinternal/message.png b/docs-en/21-tdinternal/message.png deleted file mode 100644 index 715a8bd37ee9fe7e96eacce4e7ff563fedeefbee..0000000000000000000000000000000000000000 Binary files a/docs-en/21-tdinternal/message.png and /dev/null differ diff --git a/docs-en/21-tdinternal/message.webp b/docs-en/21-tdinternal/message.webp new file mode 100644 index 0000000000000000000000000000000000000000..a2a42abff3d6e932b41a3abe9feae4a5cc13c9e5 Binary files /dev/null and b/docs-en/21-tdinternal/message.webp differ diff --git a/docs-en/21-tdinternal/modules.png b/docs-en/21-tdinternal/modules.png deleted file mode 100644 index 10ae4703a6cbbf66afea325ce4c0f919f7769a07..0000000000000000000000000000000000000000 Binary files a/docs-en/21-tdinternal/modules.png and /dev/null differ diff --git a/docs-en/21-tdinternal/modules.webp b/docs-en/21-tdinternal/modules.webp new file mode 100644 index 0000000000000000000000000000000000000000..718a6abccdbe40d4a0df5e3812fe0ab943a7c523 Binary files /dev/null and b/docs-en/21-tdinternal/modules.webp differ diff --git a/docs-en/21-tdinternal/multi_tables.png b/docs-en/21-tdinternal/multi_tables.png deleted file mode 100644 index 0cefaab6a9a4cdd671c671f7c6186dea41415ff0..0000000000000000000000000000000000000000 Binary files a/docs-en/21-tdinternal/multi_tables.png and /dev/null differ diff --git a/docs-en/21-tdinternal/multi_tables.webp b/docs-en/21-tdinternal/multi_tables.webp new file mode 100644 index 0000000000000000000000000000000000000000..8f649e34a3a62d1b11b4403b2e743ff6b5e47be2 Binary files /dev/null and b/docs-en/21-tdinternal/multi_tables.webp differ diff --git a/docs-en/21-tdinternal/replica-forward.png b/docs-en/21-tdinternal/replica-forward.png deleted file mode 100644 index bf616e030b130603eceb5dccfd30b4a1dfa68ea5..0000000000000000000000000000000000000000 Binary files a/docs-en/21-tdinternal/replica-forward.png and /dev/null differ diff --git a/docs-en/21-tdinternal/replica-forward.webp b/docs-en/21-tdinternal/replica-forward.webp new file mode 100644 index 0000000000000000000000000000000000000000..512efd4eba8f23ad0f8607eaaf5525f51ecdcf0e Binary files /dev/null and b/docs-en/21-tdinternal/replica-forward.webp differ diff --git a/docs-en/21-tdinternal/replica-master.png b/docs-en/21-tdinternal/replica-master.png deleted file mode 100644 index cb33f1ce98661563693215d8fc73b003235c7668..0000000000000000000000000000000000000000 Binary files a/docs-en/21-tdinternal/replica-master.png and /dev/null differ diff --git a/docs-en/21-tdinternal/replica-master.webp b/docs-en/21-tdinternal/replica-master.webp new file mode 100644 index 0000000000000000000000000000000000000000..57030a11f563af2689dbcfd206183f410b121aee Binary files /dev/null and b/docs-en/21-tdinternal/replica-master.webp differ diff --git a/docs-en/21-tdinternal/replica-restore.png b/docs-en/21-tdinternal/replica-restore.png deleted file mode 100644 index 1558e5ed0108d23efdc6b5d9ea0e44a1dff45d28..0000000000000000000000000000000000000000 Binary files a/docs-en/21-tdinternal/replica-restore.png and /dev/null differ diff --git a/docs-en/21-tdinternal/replica-restore.webp b/docs-en/21-tdinternal/replica-restore.webp new file mode 100644 index 0000000000000000000000000000000000000000..f282c2d4d23f517e3ef08e906cea7e9c5edc0b2a Binary files /dev/null and b/docs-en/21-tdinternal/replica-restore.webp differ diff --git a/docs-en/21-tdinternal/structure.png b/docs-en/21-tdinternal/structure.png deleted file mode 100644 index 4fc8f47ab0a30d95b85ba1d85105726ed981e56e..0000000000000000000000000000000000000000 Binary files a/docs-en/21-tdinternal/structure.png and /dev/null differ diff --git a/docs-en/21-tdinternal/structure.webp b/docs-en/21-tdinternal/structure.webp new file mode 100644 index 0000000000000000000000000000000000000000..b77a42c074b15302b5c3ab889fb550a46dd549b3 Binary files /dev/null and b/docs-en/21-tdinternal/structure.webp differ diff --git a/docs-en/21-tdinternal/vnode.png b/docs-en/21-tdinternal/vnode.png deleted file mode 100644 index e6148d4907cf9a18bc52251f712d5c685651b7f5..0000000000000000000000000000000000000000 Binary files a/docs-en/21-tdinternal/vnode.png and /dev/null differ diff --git a/docs-en/21-tdinternal/vnode.webp b/docs-en/21-tdinternal/vnode.webp new file mode 100644 index 0000000000000000000000000000000000000000..fae3104c89c542c26790b509d12ad56661082c32 Binary files /dev/null and b/docs-en/21-tdinternal/vnode.webp differ diff --git a/docs-en/21-tdinternal/write_master.png b/docs-en/21-tdinternal/write_master.png deleted file mode 100644 index ff2dfc20bfc2ecf956a2aab1a8965a7bbcae4387..0000000000000000000000000000000000000000 Binary files a/docs-en/21-tdinternal/write_master.png and /dev/null differ diff --git a/docs-en/21-tdinternal/write_master.webp b/docs-en/21-tdinternal/write_master.webp new file mode 100644 index 0000000000000000000000000000000000000000..9624036ed3d46ed60924ead9ce5c61acee0f4652 Binary files /dev/null and b/docs-en/21-tdinternal/write_master.webp differ diff --git a/docs-en/21-tdinternal/write_slave.png b/docs-en/21-tdinternal/write_slave.png deleted file mode 100644 index cacb2cb6bcc4f4d934e979862387e1345bbac078..0000000000000000000000000000000000000000 Binary files a/docs-en/21-tdinternal/write_slave.png and /dev/null differ diff --git a/docs-en/21-tdinternal/write_slave.webp b/docs-en/21-tdinternal/write_slave.webp new file mode 100644 index 0000000000000000000000000000000000000000..7c45dec11b00e6a738de458f9e1bedacfad75a96 Binary files /dev/null and b/docs-en/21-tdinternal/write_slave.webp differ diff --git a/docs-en/25-application/01-telegraf.md b/docs-en/25-application/01-telegraf.md index 718e04ecd3dbd2a72feba3f5297d9da33a94ba83..6a57145cd3d82ca5ec1ab828bfc7b6270bbe9d47 100644 --- a/docs-en/25-application/01-telegraf.md +++ b/docs-en/25-application/01-telegraf.md @@ -16,7 +16,7 @@ Current mainstream IT DevOps system usually include a data collection module, a This article introduces how to quickly build a TDengine + Telegraf + Grafana based IT DevOps visualization system without writing even a single line of code and by simply modifying a few lines of configuration files. The architecture is as follows. -![IT-DevOps-Solutions-Telegraf.png](/img/IT-DevOps-Solutions-Telegraf.png) +![TDengine Database IT-DevOps-Solutions-Telegraf](./IT-DevOps-Solutions-Telegraf.webp) ## Installation steps @@ -73,9 +73,9 @@ sudo systemctl start telegraf Log in to the Grafana interface using a web browser at `IP:3000`, with the system's initial username and password being `admin/admin`. Click on the gear icon on the left and select `Plugins`, you should find the TDengine data source plugin icon. -Click on the plus icon on the left and select `Import` to get the data from `https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard- v0.1.0.json`, download the dashboard JSON file and import it. You will then see the dashboard in the following screen. +Click on the plus icon on the left and select `Import` to get the data from `https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json`, download the dashboard JSON file and import it. You will then see the dashboard in the following screen. -![IT-DevOps-Solutions-telegraf-dashboard.png](/img/IT-DevOps-Solutions-telegraf-dashboard.png) +![TDengine Database IT-DevOps-Solutions-telegraf-dashboard](./IT-DevOps-Solutions-telegraf-dashboard.webp) ## Wrap-up diff --git a/docs-en/25-application/02-collectd.md b/docs-en/25-application/02-collectd.md index 2ac37618fafe11e71b215313e53f89b6c302f7cb..963881eafa6e5085eab951c1b1ab54faeba1fa7b 100644 --- a/docs-en/25-application/02-collectd.md +++ b/docs-en/25-application/02-collectd.md @@ -17,7 +17,7 @@ The new version of TDengine supports multiple data protocols and can accept data This article introduces how to quickly build an IT DevOps visualization system based on TDengine + collectd / StatsD + Grafana without writing even a single line of code but by simply modifying a few lines of configuration files. The architecture is shown in the following figure. -![IT-DevOps-Solutions-Collectd-StatsD.png](/img/IT-DevOps-Solutions-Collectd-StatsD.png) +![TDengine Database IT-DevOps-Solutions-Collectd-StatsD](./IT-DevOps-Solutions-Collectd-StatsD.webp) ## Installation Steps @@ -83,19 +83,19 @@ Click on the gear icon on the left and select `Plugins`, you should find the TDe Download the dashboard json from `https://github.com/taosdata/grafanaplugin/blob/master/examples/collectd/grafana/dashboards/collect-metrics-with-tdengine-v0.1.0.json`, click the plus icon on the left and select Import, follow the instructions to import the JSON file. After that, you can see The dashboard can be seen in the following screen. -![IT-DevOps-Solutions-collectd-dashboard.png](/img/IT-DevOps-Solutions-collectd-dashboard.png) +![TDengine Database IT-DevOps-Solutions-collectd-dashboard](./IT-DevOps-Solutions-collectd-dashboard.webp) #### import collectd dashboard Download the dashboard json file from `https://github.com/taosdata/grafanaplugin/blob/master/examples/collectd/grafana/dashboards/collect-metrics-with-tdengine-v0.1.0.json`. Download the dashboard json file, click the plus icon on the left side and select `Import`, and follow the interface prompts to select the JSON file to import. After that, you can see dashboard with the following interface. -![IT-DevOps-Solutions-collectd-dashboard.png](/img/IT-DevOps-Solutions-collectd-dashboard.png) +![IT-DevOps-Solutions-collectd-dashboard](./IT-DevOps-Solutions-collectd-dashboard.webp) #### Importing the StatsD dashboard Download the dashboard json from `https://github.com/taosdata/grafanaplugin/blob/master/examples/statsd/dashboards/statsd-with-tdengine-v0.1.0.json`. Click on the plus icon on the left and select `Import`, and follow the interface prompts to import the JSON file. You will then see the dashboard in the following screen. -![IT-DevOps-Solutions-statsd-dashboard.png](/img/IT-DevOps-Solutions-statsd-dashboard.png) +![TDengine Database IT-DevOps-Solutions-statsd-dashboard](./IT-DevOps-Solutions-statsd-dashboard.webp) ## Wrap-up diff --git a/docs-en/25-application/03-immigrate.md b/docs-en/25-application/03-immigrate.md index 81d5f512bfc8ed3cd1f5223a9ff72218023515f0..69166bf78b66a23af35af726f2e5c477195a3595 100644 --- a/docs-en/25-application/03-immigrate.md +++ b/docs-en/25-application/03-immigrate.md @@ -32,7 +32,7 @@ We will explain how to migrate OpenTSDB applications to TDengine quickly, secure The following figure (Figure 1) shows the system's overall architecture for a typical DevOps application scenario. **Figure 1. Typical architecture in a DevOps scenario** -Figure 1. [IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch](/img/IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.jpg "Figure 1. Typical architecture in a DevOps scenario") +![TDengine Database IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch](./IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.webp "Figure 1. Typical architecture in a DevOps scenario") In this application scenario, there are Agent tools deployed in the application environment to collect machine metrics, network metrics, and application metrics. Data collectors to aggregate information collected by agents, systems for persistent data storage and management, and tools for monitoring data visualization (e.g., Grafana, etc.). @@ -75,7 +75,7 @@ After writing the data to TDengine properly, you can adapt Grafana to visualize TDengine provides two sets of Dashboard templates by default, and users only need to import the templates from the Grafana directory into Grafana to activate their use. **Importing Grafana Templates** Figure 2. -! [](/img/IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.jpg "Figure 2. Importing a Grafana Template") +![TDengine Database IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard](./IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.webp "Figure 2. Importing a Grafana Template") After the above steps, you completed the migration to replace OpenTSDB with TDengine. You can see that the whole process is straightforward, there is no need to write any code, and only some configuration files need to be adjusted to meet the migration work. @@ -88,7 +88,7 @@ In most DevOps scenarios, if you have a small OpenTSDB cluster (3 or fewer nodes Suppose your application is particularly complex, or the application domain is not a DevOps scenario. You can continue reading subsequent chapters for a more comprehensive and in-depth look at the advanced topics of migrating an OpenTSDB application to TDengine. **Figure 3. System architecture after migration** -! [IT-DevOps-Solutions-Immigrate-TDengine-Arch](/img/IT-DevOps-Solutions-Immigrate-TDengine-Arch.jpg "Figure 3. System architecture after migration completion") +![TDengine Database IT-DevOps-Solutions-Immigrate-TDengine-Arch](./IT-DevOps-Solutions-Immigrate-TDengine-Arch.webp "Figure 3. System architecture after migration completion") ## Migration evaluation and strategy for other scenarios @@ -96,7 +96,7 @@ Suppose your application is particularly complex, or the application domain is n This chapter describes the differences between OpenTSDB and TDengine at the system functionality level. After reading this chapter, you can fully evaluate whether you can migrate some complex OpenTSDB-based applications to TDengine, and what you should pay attention to after migration. -TDengine currently only supports Grafana for visual kanban rendering, so if your application uses front-end kanban boards other than Grafana (e.g., [TSDash](https://github.com/facebook/tsdash), [Status Wolf](https://github) .com/box/StatusWolf), etc.). You cannot directly migrate those front-end kanbans to TDengine, and the front-end kanban will need to be ported to Grafana to work correctly. +TDengine currently only supports Grafana for visual kanban rendering, so if your application uses front-end kanban boards other than Grafana (e.g., [TSDash](https://github.com/facebook/tsdash), [Status Wolf](https://github.com/box/StatusWolf), etc.). You cannot directly migrate those front-end kanbans to TDengine, and the front-end kanban will need to be ported to Grafana to work correctly. TDengine version 2.3.0.x only supports collectd and StatsD as data collection aggregation software but will provide more data collection aggregation software in the future. If you use other data aggregators on the collection side, your application needs to be ported to these two data aggregation systems to write data correctly. In addition to the two data aggregator software protocols mentioned above, TDengine also supports writing data directly via InfluxDB's line protocol and OpenTSDB's data writing protocol, JSON format. You can rewrite the logic on the data push side to write data using the line protocols supported by TDengine. diff --git a/docs-en/25-application/IT-DevOps-Solutions-Collectd-StatsD.webp b/docs-en/25-application/IT-DevOps-Solutions-Collectd-StatsD.webp new file mode 100644 index 0000000000000000000000000000000000000000..147a65b17bff2aa0e44faa206618bdce5664e1ca Binary files /dev/null and b/docs-en/25-application/IT-DevOps-Solutions-Collectd-StatsD.webp differ diff --git a/docs-en/25-application/IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.webp b/docs-en/25-application/IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.webp new file mode 100644 index 0000000000000000000000000000000000000000..3ca99c835b33df8845adf1b52d8fb8eb63076e82 Binary files /dev/null and b/docs-en/25-application/IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.webp differ diff --git a/docs-en/25-application/IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.webp b/docs-en/25-application/IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.webp new file mode 100644 index 0000000000000000000000000000000000000000..04811f61b9b318e129552d87cd48eabf6e99feab Binary files /dev/null and b/docs-en/25-application/IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.webp differ diff --git a/docs-en/25-application/IT-DevOps-Solutions-Immigrate-TDengine-Arch.webp b/docs-en/25-application/IT-DevOps-Solutions-Immigrate-TDengine-Arch.webp new file mode 100644 index 0000000000000000000000000000000000000000..36930068758556f4de5b58321804a96401c64b22 Binary files /dev/null and b/docs-en/25-application/IT-DevOps-Solutions-Immigrate-TDengine-Arch.webp differ diff --git a/docs-en/25-application/IT-DevOps-Solutions-Telegraf.webp b/docs-en/25-application/IT-DevOps-Solutions-Telegraf.webp new file mode 100644 index 0000000000000000000000000000000000000000..fd5461ec9b37be66cac4c17fb1f81fec76158330 Binary files /dev/null and b/docs-en/25-application/IT-DevOps-Solutions-Telegraf.webp differ diff --git a/docs-en/25-application/IT-DevOps-Solutions-collectd-dashboard.webp b/docs-en/25-application/IT-DevOps-Solutions-collectd-dashboard.webp new file mode 100644 index 0000000000000000000000000000000000000000..879c27a1a5843c714ff3c33c1dccfa32a2154b82 Binary files /dev/null and b/docs-en/25-application/IT-DevOps-Solutions-collectd-dashboard.webp differ diff --git a/docs-en/25-application/IT-DevOps-Solutions-statsd-dashboard.webp b/docs-en/25-application/IT-DevOps-Solutions-statsd-dashboard.webp new file mode 100644 index 0000000000000000000000000000000000000000..1d4c655970b5f3fcb3be2d65d67eb42f08f35862 Binary files /dev/null and b/docs-en/25-application/IT-DevOps-Solutions-statsd-dashboard.webp differ diff --git a/docs-en/25-application/IT-DevOps-Solutions-telegraf-dashboard.webp b/docs-en/25-application/IT-DevOps-Solutions-telegraf-dashboard.webp new file mode 100644 index 0000000000000000000000000000000000000000..105afcdb8312b23675f62ff6339d5e737b5cd958 Binary files /dev/null and b/docs-en/25-application/IT-DevOps-Solutions-telegraf-dashboard.webp differ diff --git a/docs-en/27-train-faq/03-docker.md b/docs-en/27-train-faq/03-docker.md index ba435a9307c1d6595579a295df83030c58ba0f22..8f27c35d7945043d39ad83626ceccee941ad135e 100644 --- a/docs-en/27-train-faq/03-docker.md +++ b/docs-en/27-train-faq/03-docker.md @@ -118,7 +118,7 @@ Output is like below: {"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep0,keep1,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep0,keep1,keep(D)",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["test","2021-08-18 06:01:11.021",10000,4,1,1,10,"3650,3650,3650",16,6,100,4096,1,3000,2,0,"ms",0,"ready"],["log","2021-08-18 05:51:51.065",4,1,1,1,10,"30,30,30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":2} ``` -For details of REST API please refer to [REST API]](/reference/rest-api/). +For details of REST API please refer to [REST API](/reference/rest-api/). ### Run TDengine server and taosAdapter inside container @@ -265,7 +265,7 @@ Below is an example output: $ taos> select groupid, location from test.d0; groupid | location | ================================= - 0 | shanghai | + 0 | California.SanDiego | Query OK, 1 row(s) in set (0.003490s) ``` diff --git a/docs-examples/c/async_query_example.c b/docs-examples/c/async_query_example.c index 262757f02b5c52f2d4402d363663db80bb38a54d..b370420b124a21b05f8e0b4041fb1461b1e2478a 100644 --- a/docs-examples/c/async_query_example.c +++ b/docs-examples/c/async_query_example.c @@ -182,14 +182,14 @@ int main() { // query callback ... // ts current voltage phase location groupid // numOfRow = 8 -// 1538548685000 10.300000 219 0.310000 beijing.chaoyang 2 -// 1538548695000 12.600000 218 0.330000 beijing.chaoyang 2 -// 1538548696800 12.300000 221 0.310000 beijing.chaoyang 2 -// 1538548696650 10.300000 218 0.250000 beijing.chaoyang 3 -// 1538548685500 11.800000 221 0.280000 beijing.haidian 2 -// 1538548696600 13.400000 223 0.290000 beijing.haidian 2 -// 1538548685000 10.800000 223 0.290000 beijing.haidian 3 -// 1538548686500 11.500000 221 0.350000 beijing.haidian 3 +// 1538548685500 11.800000 221 0.280000 california.losangeles 2 +// 1538548696600 13.400000 223 0.290000 california.losangeles 2 +// 1538548685000 10.800000 223 0.290000 california.losangeles 3 +// 1538548686500 11.500000 221 0.350000 california.losangeles 3 +// 1538548685000 10.300000 219 0.310000 california.sanfrancisco 2 +// 1538548695000 12.600000 218 0.330000 california.sanfrancisco 2 +// 1538548696800 12.300000 221 0.310000 california.sanfrancisco 2 +// 1538548696650 10.300000 218 0.250000 california.sanfrancisco 3 // numOfRow = 0 // no more data, close the connection. // ANCHOR_END: demo \ No newline at end of file diff --git a/docs-examples/c/insert_example.c b/docs-examples/c/insert_example.c index ca12be9314efbda707dbd05449c746794c209743..ce8fdc5b9372aec7b02d3c9254ec25c4c4f62adc 100644 --- a/docs-examples/c/insert_example.c +++ b/docs-examples/c/insert_example.c @@ -36,10 +36,10 @@ int main() { executeSQL(taos, "CREATE DATABASE power"); executeSQL(taos, "USE power"); executeSQL(taos, "CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)"); - executeSQL(taos, "INSERT INTO d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)" - "d1002 USING meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)" - "d1003 USING meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)" - "d1004 USING meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)"); + executeSQL(taos, "INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)" + "d1002 USING meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)" + "d1003 USING meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)" + "d1004 USING meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)"); taos_close(taos); taos_cleanup(); } diff --git a/docs-examples/c/json_protocol_example.c b/docs-examples/c/json_protocol_example.c index 182fd201308facc80c76f36cfa57580784d70413..9d276127a64c3d74322e30587ab2e319c29cbf65 100644 --- a/docs-examples/c/json_protocol_example.c +++ b/docs-examples/c/json_protocol_example.c @@ -29,11 +29,11 @@ int main() { executeSQL(taos, "USE test"); char *line = "[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": " - "\"Beijing.Chaoyang\", \"groupid\": 2}},{\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, " - "\"value\": 219, \"tags\": {\"location\": \"Beijing.Haidian\", \"groupid\": 1}},{\"metric\": \"meters.current\", " - "\"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"Beijing.Chaoyang\", \"groupid\": " + "\"California.SanFrancisco\", \"groupid\": 2}},{\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, " + "\"value\": 219, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}},{\"metric\": \"meters.current\", " + "\"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": " "2}},{\"metric\": \"meters.voltage\", \"timestamp\": 1648432611250, \"value\": 221, \"tags\": {\"location\": " - "\"Beijing.Haidian\", \"groupid\": 1}}]"; + "\"California.LosAngeles\", \"groupid\": 1}}]"; char *lines[] = {line}; TAOS_RES *res = taos_schemaless_insert(taos, lines, 1, TSDB_SML_JSON_PROTOCOL, TSDB_SML_TIMESTAMP_NOT_CONFIGURED); diff --git a/docs-examples/c/line_example.c b/docs-examples/c/line_example.c index 8dd4b1a5075369625645959da0476b76b9fbf290..ce39f8d9df744082a450ce246529bf56adebd1e0 100644 --- a/docs-examples/c/line_example.c +++ b/docs-examples/c/line_example.c @@ -27,10 +27,10 @@ int main() { executeSQL(taos, "DROP DATABASE IF EXISTS test"); executeSQL(taos, "CREATE DATABASE test"); executeSQL(taos, "USE test"); - char *lines[] = {"meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249", - "meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250", - "meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249", - "meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250"}; + char *lines[] = {"meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249", + "meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250", + "meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249", + "meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250"}; TAOS_RES *res = taos_schemaless_insert(taos, lines, 4, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS); if (taos_errno(res) != 0) { printf("failed to insert schema-less data, reason: %s\n", taos_errstr(res)); diff --git a/docs-examples/c/multi_bind_example.c b/docs-examples/c/multi_bind_example.c index fe11df9caad3e216fbd0b1ff2f40a54fe3ba86e5..02e6568e9e88ac8703a4993ed406e770d23c2438 100644 --- a/docs-examples/c/multi_bind_example.c +++ b/docs-examples/c/multi_bind_example.c @@ -52,7 +52,7 @@ void insertData(TAOS *taos) { checkErrorCode(stmt, code, "failed to execute taos_stmt_prepare"); // bind table name and tags TAOS_BIND tags[2]; - char *location = "Beijing.Chaoyang"; + char *location = "California.SanFrancisco"; int groupId = 2; tags[0].buffer_type = TSDB_DATA_TYPE_BINARY; tags[0].buffer_length = strlen(location); diff --git a/docs-examples/c/query_example.c b/docs-examples/c/query_example.c index f88b2467ceb3d9bbeaf6b3beb6a24befd3e398c6..fcae95bcd45a282eaa3ae911b4115e6300c6af8e 100644 --- a/docs-examples/c/query_example.c +++ b/docs-examples/c/query_example.c @@ -139,5 +139,5 @@ int main() { // output: // ts current voltage phase location groupid -// 1648432611249 10.300000 219 0.310000 Beijing.Chaoyang 2 -// 1648432611749 12.600000 218 0.330000 Beijing.Chaoyang 2 \ No newline at end of file +// 1648432611249 10.300000 219 0.310000 California.SanFrancisco 2 +// 1648432611749 12.600000 218 0.330000 California.SanFrancisco 2 \ No newline at end of file diff --git a/docs-examples/c/stmt_example.c b/docs-examples/c/stmt_example.c index fab1506f953ef68050e4318406fa2ba1a0202929..28dae5f9d5ea2faec0aa3c0a784d39e252651c65 100644 --- a/docs-examples/c/stmt_example.c +++ b/docs-examples/c/stmt_example.c @@ -59,7 +59,7 @@ void insertData(TAOS *taos) { checkErrorCode(stmt, code, "failed to execute taos_stmt_prepare"); // bind table name and tags TAOS_BIND tags[2]; - char* location = "Beijing.Chaoyang"; + char* location = "California.SanFrancisco"; int groupId = 2; tags[0].buffer_type = TSDB_DATA_TYPE_BINARY; tags[0].buffer_length = strlen(location); diff --git a/docs-examples/c/telnet_line_example.c b/docs-examples/c/telnet_line_example.c index 913d433f6aec07b3bce115d45536ffa4b45a0481..da62da4ba492856b0d73a564c1bf9cdd60b5b742 100644 --- a/docs-examples/c/telnet_line_example.c +++ b/docs-examples/c/telnet_line_example.c @@ -28,14 +28,14 @@ int main() { executeSQL(taos, "CREATE DATABASE test"); executeSQL(taos, "USE test"); char *lines[] = { - "meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3", - "meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3", + "meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2", + "meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2", + "meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3", + "meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3", }; TAOS_RES *res = taos_schemaless_insert(taos, lines, 8, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NOT_CONFIGURED); if (taos_errno(res) != 0) { diff --git a/docs-examples/csharp/AsyncQueryExample.cs b/docs-examples/csharp/AsyncQueryExample.cs index fe30d21efe82e8d1dc414bd4723227ca93bc944f..3dabbebd1630a207af2e1b1b11cc4ba15bdd94a9 100644 --- a/docs-examples/csharp/AsyncQueryExample.cs +++ b/docs-examples/csharp/AsyncQueryExample.cs @@ -224,15 +224,15 @@ namespace TDengineExample } //output: -//Connect to TDengine success -//8 rows async retrieved - -//1538548685000 | 10.3 | 219 | 0.31 | beijing.chaoyang | 2 | -//1538548695000 | 12.6 | 218 | 0.33 | beijing.chaoyang | 2 | -//1538548696800 | 12.3 | 221 | 0.31 | beijing.chaoyang | 2 | -//1538548696650 | 10.3 | 218 | 0.25 | beijing.chaoyang | 3 | -//1538548685500 | 11.8 | 221 | 0.28 | beijing.haidian | 2 | -//1538548696600 | 13.4 | 223 | 0.29 | beijing.haidian | 2 | -//1538548685000 | 10.8 | 223 | 0.29 | beijing.haidian | 3 | -//1538548686500 | 11.5 | 221 | 0.35 | beijing.haidian | 3 | -//async retrieve complete. \ No newline at end of file +// Connect to TDengine success +// 8 rows async retrieved + +// 1538548685500 | 11.8 | 221 | 0.28 | california.losangeles | 2 | +// 1538548696600 | 13.4 | 223 | 0.29 | california.losangeles | 2 | +// 1538548685000 | 10.8 | 223 | 0.29 | california.losangeles | 3 | +// 1538548686500 | 11.5 | 221 | 0.35 | california.losangeles | 3 | +// 1538548685000 | 10.3 | 219 | 0.31 | california.sanfrancisco | 2 | +// 1538548695000 | 12.6 | 218 | 0.33 | california.sanfrancisco | 2 | +// 1538548696800 | 12.3 | 221 | 0.31 | california.sanfrancisco | 2 | +// 1538548696650 | 10.3 | 218 | 0.25 | california.sanfrancisco | 3 | +// async retrieve complete. \ No newline at end of file diff --git a/docs-examples/csharp/InfluxDBLineExample.cs b/docs-examples/csharp/InfluxDBLineExample.cs index 7aad08825209db568d61e5963ec7a00034ab7ca7..7b4453f4ac0b14dd76d166e395bdacb46a5d3fbc 100644 --- a/docs-examples/csharp/InfluxDBLineExample.cs +++ b/docs-examples/csharp/InfluxDBLineExample.cs @@ -9,10 +9,10 @@ namespace TDengineExample IntPtr conn = GetConnection(); PrepareDatabase(conn); string[] lines = { - "meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249", - "meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250", - "meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249", - "meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250" + "meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249", + "meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250", + "meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249", + "meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250" }; IntPtr res = TDengine.SchemalessInsert(conn, lines, lines.Length, (int)TDengineSchemalessProtocol.TSDB_SML_LINE_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_MILLI_SECONDS); if (TDengine.ErrorNo(res) != 0) diff --git a/docs-examples/csharp/OptsJsonExample.cs b/docs-examples/csharp/OptsJsonExample.cs index d774a325afa1a8d93eb858f23dcd97dd29f8653d..2c41acc5c9628befda7eb4ad5c30af5b921de948 100644 --- a/docs-examples/csharp/OptsJsonExample.cs +++ b/docs-examples/csharp/OptsJsonExample.cs @@ -8,10 +8,10 @@ namespace TDengineExample { IntPtr conn = GetConnection(); PrepareDatabase(conn); - string[] lines = { "[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": \"Beijing.Chaoyang\", \"groupid\": 2}}," + - " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, \"value\": 219, \"tags\": {\"location\": \"Beijing.Haidian\", \"groupid\": 1}}, " + - "{\"metric\": \"meters.current\", \"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"Beijing.Chaoyang\", \"groupid\": 2}}," + - " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611250, \"value\": 221, \"tags\": {\"location\": \"Beijing.Haidian\", \"groupid\": 1}}]" + string[] lines = { "[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," + + " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, \"value\": 219, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}}, " + + "{\"metric\": \"meters.current\", \"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," + + " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611250, \"value\": 221, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}}]" }; IntPtr res = TDengine.SchemalessInsert(conn, lines, 1, (int)TDengineSchemalessProtocol.TSDB_SML_JSON_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NOT_CONFIGURED); diff --git a/docs-examples/csharp/OptsTelnetExample.cs b/docs-examples/csharp/OptsTelnetExample.cs index 81608c32213fa0618a2ca6e0769aacf8e9c8e64d..bb752db1afbbb2ef68df9ca25314c8b91cd9a266 100644 --- a/docs-examples/csharp/OptsTelnetExample.cs +++ b/docs-examples/csharp/OptsTelnetExample.cs @@ -9,14 +9,14 @@ namespace TDengineExample IntPtr conn = GetConnection(); PrepareDatabase(conn); string[] lines = { - "meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3", - "meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3", + "meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2", + "meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2", + "meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3", + "meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3", }; IntPtr res = TDengine.SchemalessInsert(conn, lines, lines.Length, (int)TDengineSchemalessProtocol.TSDB_SML_TELNET_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NOT_CONFIGURED); if (TDengine.ErrorNo(res) != 0) diff --git a/docs-examples/csharp/QueryExample.cs b/docs-examples/csharp/QueryExample.cs index f00e391100c7ce42177e2987f5b0b32dc02262c4..97f0c456d412e2ed608c345ba87469d3f5ccfc15 100644 --- a/docs-examples/csharp/QueryExample.cs +++ b/docs-examples/csharp/QueryExample.cs @@ -158,5 +158,5 @@ namespace TDengineExample // Connect to TDengine success // fieldCount=6 // ts current voltage phase location groupid -// 1648432611249 10.3 219 0.31 Beijing.Chaoyang 2 -// 1648432611749 12.6 218 0.33 Beijing.Chaoyang 2 \ No newline at end of file +// 1648432611249 10.3 219 0.31 California.SanFrancisco 2 +// 1648432611749 12.6 218 0.33 California.SanFrancisco 2 \ No newline at end of file diff --git a/docs-examples/csharp/SQLInsertExample.cs b/docs-examples/csharp/SQLInsertExample.cs index fa2e2a50daf06f4d948479e7f5b0df82c517f809..d5462c1062e01fd5c93bac983696d0350117ad92 100644 --- a/docs-examples/csharp/SQLInsertExample.cs +++ b/docs-examples/csharp/SQLInsertExample.cs @@ -15,10 +15,10 @@ namespace TDengineExample CheckRes(conn, res, "failed to change database"); res = TDengine.Query(conn, "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)"); CheckRes(conn, res, "failed to create stable"); - var sql = "INSERT INTO d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) " + - "d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) " + - "d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000)('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) " + - "d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000)('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)"; + var sql = "INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) " + + "d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) " + + "d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000)('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) " + + "d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000)('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)"; res = TDengine.Query(conn, sql); CheckRes(conn, res, "failed to insert data"); int affectedRows = TDengine.AffectRows(res); diff --git a/docs-examples/csharp/StmtInsertExample.cs b/docs-examples/csharp/StmtInsertExample.cs index d6e00dd4ac54ab8dbfc33b93896d19fc585e7642..6ade424b95d64529b7a40a782de13e3106d0c78a 100644 --- a/docs-examples/csharp/StmtInsertExample.cs +++ b/docs-examples/csharp/StmtInsertExample.cs @@ -21,7 +21,7 @@ namespace TDengineExample CheckStmtRes(res, "failed to prepare stmt"); // 2. bind table name and tags - TAOS_BIND[] tags = new TAOS_BIND[2] { TaosBind.BindBinary("Beijing.Chaoyang"), TaosBind.BindInt(2) }; + TAOS_BIND[] tags = new TAOS_BIND[2] { TaosBind.BindBinary("California.SanFrancisco"), TaosBind.BindInt(2) }; res = TDengine.StmtSetTbnameTags(stmt, "d1001", tags); CheckStmtRes(res, "failed to bind table name and tags"); diff --git a/docs-examples/go/connect/cgoexample/main.go b/docs-examples/go/connect/cgoexample/main.go index 8b9aba4ce4217c00605bc8796c788f3dd52805e6..ba7ed0f728a1cd546dbc3199ce4c0dc854ebee91 100644 --- a/docs-examples/go/connect/cgoexample/main.go +++ b/docs-examples/go/connect/cgoexample/main.go @@ -20,4 +20,4 @@ func main() { // use // var taosDSN = "root:taosdata@tcp(localhost:6030)/dbName" -// if you want to connect to a default database. +// if you want to connect a specified database named "dbName". diff --git a/docs-examples/go/connect/restexample/main.go b/docs-examples/go/connect/restexample/main.go index 9c05e7eed80dee4ae7e6b20637d265f388d7438d..1efc98b988c183c4c680884057bf2a72a9dd19e9 100644 --- a/docs-examples/go/connect/restexample/main.go +++ b/docs-examples/go/connect/restexample/main.go @@ -18,6 +18,6 @@ func main() { defer taos.Close() } -// use +// use // var taosDSN = "root:taosdata@http(localhost:6041)/dbName" -// if you want to connect to a default database. +// if you want to connect a specified database named "dbName". diff --git a/docs-examples/go/insert/json/main.go b/docs-examples/go/insert/json/main.go index 47d9e9984adc05896fb9954ad3deffde3764b836..6be375270e32a5091c015f88de52c9dda2246b59 100644 --- a/docs-examples/go/insert/json/main.go +++ b/docs-examples/go/insert/json/main.go @@ -25,10 +25,10 @@ func main() { defer conn.Close() prepareDatabase(conn) - payload := `[{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "Beijing.Chaoyang", "groupid": 2}}, - {"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, "tags": {"location": "Beijing.Haidian", "groupid": 1}}, - {"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, "tags": {"location": "Beijing.Chaoyang", "groupid": 2}}, - {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "Beijing.Haidian", "groupid": 1}}]` + payload := `[{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, + {"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, "tags": {"location": "California.LosAngeles", "groupid": 1}}, + {"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, + {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "California.LosAngeles", "groupid": 1}}]` err = conn.OpenTSDBInsertJsonPayload(payload) if err != nil { diff --git a/docs-examples/go/insert/line/main.go b/docs-examples/go/insert/line/main.go index bbc41468fe5f13d3e6f896445bb88f3eba584d0f..c17e1a5270850e6a8b497e0dbec4ae714ee1e2d6 100644 --- a/docs-examples/go/insert/line/main.go +++ b/docs-examples/go/insert/line/main.go @@ -25,10 +25,10 @@ func main() { defer conn.Close() prepareDatabase(conn) var lines = []string{ - "meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249", - "meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250", - "meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249", - "meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250", + "meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249", + "meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250", + "meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249", + "meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250", } err = conn.InfluxDBInsertLines(lines, "ms") diff --git a/docs-examples/go/insert/sql/main.go b/docs-examples/go/insert/sql/main.go index 91386855334c1930af721e0b4f43395c6a6d8e82..6cd5f860e65f4fffd139668f69cc1772f5310eae 100644 --- a/docs-examples/go/insert/sql/main.go +++ b/docs-examples/go/insert/sql/main.go @@ -19,10 +19,10 @@ func createStable(taos *sql.DB) { } func insertData(taos *sql.DB) { - sql := `INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) - power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) - power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) - power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)` + sql := `INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) + power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) + power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) + power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)` result, err := taos.Exec(sql) if err != nil { fmt.Println("failed to insert, err:", err) diff --git a/docs-examples/go/insert/stmt/main.go b/docs-examples/go/insert/stmt/main.go index c50200ebb427c4c64c2737cb8fe4c3d287551a34..7093fdf1e52bc5a14fc92cec995fd81e70717d9f 100644 --- a/docs-examples/go/insert/stmt/main.go +++ b/docs-examples/go/insert/stmt/main.go @@ -37,7 +37,7 @@ func main() { checkErr(err, "failed to create prepare statement") // bind table name and tags - tagParams := param.NewParam(2).AddBinary([]byte("Beijing.Chaoyang")).AddInt(2) + tagParams := param.NewParam(2).AddBinary([]byte("California.SanFrancisco")).AddInt(2) err = stmt.SetTableNameWithTags("d1001", tagParams) checkErr(err, "failed to execute SetTableNameWithTags") diff --git a/docs-examples/go/insert/telnet/main.go b/docs-examples/go/insert/telnet/main.go index 879e6d5cece74fd0b7c815dd34614dca3c9d4544..91fafbe71adbf60d9341b903f5a25708b7011852 100644 --- a/docs-examples/go/insert/telnet/main.go +++ b/docs-examples/go/insert/telnet/main.go @@ -25,14 +25,14 @@ func main() { defer conn.Close() prepareDatabase(conn) var lines = []string{ - "meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3", - "meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3", + "meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2", + "meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2", + "meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3", + "meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3", } err = conn.OpenTSDBInsertTelnetLines(lines) diff --git a/docs-examples/java/src/main/java/com/taos/example/JNIConnectExample.java b/docs-examples/java/src/main/java/com/taos/example/JNIConnectExample.java index c6ce2ef9785a010daa55ad29415f81711760cd57..84292f7e8682dbb8171c807da74a603f4ae8256e 100644 --- a/docs-examples/java/src/main/java/com/taos/example/JNIConnectExample.java +++ b/docs-examples/java/src/main/java/com/taos/example/JNIConnectExample.java @@ -22,4 +22,4 @@ public class JNIConnectExample { // use // String jdbcUrl = "jdbc:TAOS://localhost:6030/dbName?user=root&password=taosdata"; -// if you want to connect to a default database. \ No newline at end of file +// if you want to connect a specified database named "dbName". \ No newline at end of file diff --git a/docs-examples/java/src/main/java/com/taos/example/JSONProtocolExample.java b/docs-examples/java/src/main/java/com/taos/example/JSONProtocolExample.java index cb83424576a4fd7dfa09ea297294ed77b66bd12d..c8e649482fbd747cdc238daa9e7a237cf63295b6 100644 --- a/docs-examples/java/src/main/java/com/taos/example/JSONProtocolExample.java +++ b/docs-examples/java/src/main/java/com/taos/example/JSONProtocolExample.java @@ -23,10 +23,10 @@ public class JSONProtocolExample { } private static String getJSONData() { - return "[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": \"Beijing.Chaoyang\", \"groupid\": 2}}," + - " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, \"value\": 219, \"tags\": {\"location\": \"Beijing.Haidian\", \"groupid\": 1}}, " + - "{\"metric\": \"meters.current\", \"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"Beijing.Chaoyang\", \"groupid\": 2}}," + - " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611250, \"value\": 221, \"tags\": {\"location\": \"Beijing.Haidian\", \"groupid\": 1}}]"; + return "[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," + + " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, \"value\": 219, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}}, " + + "{\"metric\": \"meters.current\", \"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," + + " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611250, \"value\": 221, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}}]"; } public static void main(String[] args) throws SQLException { diff --git a/docs-examples/java/src/main/java/com/taos/example/LineProtocolExample.java b/docs-examples/java/src/main/java/com/taos/example/LineProtocolExample.java index 8a2eabe0a91f7966cc3cc6b7dfeeb71b71b88d92..990922b7a516bd32a7e299f5743bd1b5e321868a 100644 --- a/docs-examples/java/src/main/java/com/taos/example/LineProtocolExample.java +++ b/docs-examples/java/src/main/java/com/taos/example/LineProtocolExample.java @@ -12,11 +12,11 @@ import java.sql.Statement; public class LineProtocolExample { // format: measurement,tag_set field_set timestamp private static String[] lines = { - "meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000", // micro + "meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000", // micro // seconds - "meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500", - "meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249300", - "meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611249800", + "meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500", + "meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249300", + "meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611249800", }; private static Connection getConnection() throws SQLException { diff --git a/docs-examples/java/src/main/java/com/taos/example/RestInsertExample.java b/docs-examples/java/src/main/java/com/taos/example/RestInsertExample.java index de89f26cbe38f9343d60aeb8d3e9ce7f67c2e764..af97fe4373ca964260e5614f133f359e229b0e15 100644 --- a/docs-examples/java/src/main/java/com/taos/example/RestInsertExample.java +++ b/docs-examples/java/src/main/java/com/taos/example/RestInsertExample.java @@ -16,28 +16,28 @@ public class RestInsertExample { private static List getRawData() { return Arrays.asList( - "d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,Beijing.Chaoyang,2", - "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,Beijing.Chaoyang,2", - "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,Beijing.Chaoyang,2", - "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,Beijing.Chaoyang,3", - "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,Beijing.Haidian,2", - "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,Beijing.Haidian,2", - "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,Beijing.Haidian,3", - "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,Beijing.Haidian,3" + "d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,California.SanFrancisco,2", + "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,California.SanFrancisco,2", + "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,California.SanFrancisco,2", + "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,California.SanFrancisco,3", + "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,California.LosAngeles,2", + "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,California.LosAngeles,2", + "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,California.LosAngeles,3", + "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,California.LosAngeles,3" ); } /** * The generated SQL is: - * INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) - * power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) - * power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) - * power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) - * power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) - * power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) - * power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) - * power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000) + * INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) + * power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) + * power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) + * power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) + * power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) + * power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) + * power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) + * power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000) */ private static String getSQL() { StringBuilder sb = new StringBuilder("INSERT INTO "); diff --git a/docs-examples/java/src/main/java/com/taos/example/RestQueryExample.java b/docs-examples/java/src/main/java/com/taos/example/RestQueryExample.java index b1a1d224c6d9af2b83ac039726dcdb49a33ec2b0..a3581a1f4733e8bf3e3f561bb6cab5a725d8a1c0 100644 --- a/docs-examples/java/src/main/java/com/taos/example/RestQueryExample.java +++ b/docs-examples/java/src/main/java/com/taos/example/RestQueryExample.java @@ -51,5 +51,5 @@ public class RestQueryExample { // possible output: // avg(voltage) location -// 222.0 Beijing.Haidian -// 219.0 Beijing.Chaoyang +// 222.0 California.LosAngeles +// 219.0 California.SanFrancisco diff --git a/docs-examples/java/src/main/java/com/taos/example/StmtInsertExample.java b/docs-examples/java/src/main/java/com/taos/example/StmtInsertExample.java index 2a7ccebf41cae1a22d7516966e2c6ffb10011b64..bbcc92b22f67c31384b0fb7a082975eaac2ff2bc 100644 --- a/docs-examples/java/src/main/java/com/taos/example/StmtInsertExample.java +++ b/docs-examples/java/src/main/java/com/taos/example/StmtInsertExample.java @@ -30,14 +30,14 @@ public class StmtInsertExample { private static List getRawData() { return Arrays.asList( - "d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,Beijing.Chaoyang,2", - "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,Beijing.Chaoyang,2", - "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,Beijing.Chaoyang,2", - "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,Beijing.Chaoyang,3", - "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,Beijing.Haidian,2", - "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,Beijing.Haidian,2", - "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,Beijing.Haidian,3", - "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,Beijing.Haidian,3" + "d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,California.SanFrancisco,2", + "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,California.SanFrancisco,2", + "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,California.SanFrancisco,2", + "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,California.SanFrancisco,3", + "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,California.LosAngeles,2", + "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,California.LosAngeles,2", + "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,California.LosAngeles,3", + "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,California.LosAngeles,3" ); } diff --git a/docs-examples/java/src/main/java/com/taos/example/TelnetLineProtocolExample.java b/docs-examples/java/src/main/java/com/taos/example/TelnetLineProtocolExample.java index 1431eccf16dabaac20f60ae7e971ef49707ba509..4c9368288df74f829121aeab5b925d1d083d29f0 100644 --- a/docs-examples/java/src/main/java/com/taos/example/TelnetLineProtocolExample.java +++ b/docs-examples/java/src/main/java/com/taos/example/TelnetLineProtocolExample.java @@ -11,14 +11,14 @@ import java.sql.Statement; public class TelnetLineProtocolExample { // format: =[ =] - private static String[] lines = { "meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3", - "meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3", + private static String[] lines = { "meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2", + "meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2", + "meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3", + "meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3", }; private static Connection getConnection() throws SQLException { diff --git a/docs-examples/java/src/test/java/com/taos/test/TestAll.java b/docs-examples/java/src/test/java/com/taos/test/TestAll.java index 92fe14a49d5f5ea5d7ea5f1d809867b3de0cc9d2..42db24485afec05298159f7b0c3a4e15835d98ed 100644 --- a/docs-examples/java/src/test/java/com/taos/test/TestAll.java +++ b/docs-examples/java/src/test/java/com/taos/test/TestAll.java @@ -23,16 +23,16 @@ public class TestAll { String jdbcUrl = "jdbc:TAOS://localhost:6030?user=root&password=taosdata"; try (Connection conn = DriverManager.getConnection(jdbcUrl)) { try (Statement stmt = conn.createStatement()) { - String sql = "INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000)\n" + - " power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 15:38:15.000',12.60000,218,0.33000)\n" + - " power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 15:38:16.800',12.30000,221,0.31000)\n" + - " power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES('2018-10-03 15:38:16.650',10.30000,218,0.25000)\n" + - " power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 15:38:05.500',11.80000,221,0.28000)\n" + - " power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 15:38:16.600',13.40000,223,0.29000)\n" + - " power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 15:38:05.000',10.80000,223,0.29000)\n" + - " power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 15:38:06.000',10.80000,223,0.29000)\n" + - " power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 15:38:07.000',10.80000,223,0.29000)\n" + - " power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 15:38:08.500',11.50000,221,0.35000)"; + String sql = "INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000)\n" + + " power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 15:38:15.000',12.60000,218,0.33000)\n" + + " power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 15:38:16.800',12.30000,221,0.31000)\n" + + " power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 15:38:16.650',10.30000,218,0.25000)\n" + + " power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 15:38:05.500',11.80000,221,0.28000)\n" + + " power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 15:38:16.600',13.40000,223,0.29000)\n" + + " power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:05.000',10.80000,223,0.29000)\n" + + " power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:06.000',10.80000,223,0.29000)\n" + + " power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:07.000',10.80000,223,0.29000)\n" + + " power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:08.500',11.50000,221,0.35000)"; stmt.execute(sql); } diff --git a/docs-examples/node/nativeexample/influxdb_line_example.js b/docs-examples/node/nativeexample/influxdb_line_example.js index a9fc6d11df0b335b92bb3292baaa017cb4bc42ea..2050bee54506a3ee6fe7d89de97b3b41334dd4a6 100644 --- a/docs-examples/node/nativeexample/influxdb_line_example.js +++ b/docs-examples/node/nativeexample/influxdb_line_example.js @@ -13,10 +13,10 @@ function createDatabase() { function insertData() { const lines = [ - "meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249", - "meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250", - "meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249", - "meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250", + "meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249", + "meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250", + "meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249", + "meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250", ]; cursor.schemalessInsert( lines, diff --git a/docs-examples/node/nativeexample/insert_example.js b/docs-examples/node/nativeexample/insert_example.js index 85a353f889176655654d8c39c9a905054d3b6622..ade9d83158362cbf00a856b43a973de31def7601 100644 --- a/docs-examples/node/nativeexample/insert_example.js +++ b/docs-examples/node/nativeexample/insert_example.js @@ -11,10 +11,10 @@ try { cursor.execute( "CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)" ); - var sql = `INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) -power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) -power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) -power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)`; + var sql = `INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) +power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) +power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) +power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)`; cursor.execute(sql); } finally { cursor.close(); diff --git a/docs-examples/node/nativeexample/multi_bind_example.js b/docs-examples/node/nativeexample/multi_bind_example.js index d52581ec8e10c6edfbc8fc8f7ca78512b5c93d74..6ef8b30c097393fef8c6a2837f8683c736b363f1 100644 --- a/docs-examples/node/nativeexample/multi_bind_example.js +++ b/docs-examples/node/nativeexample/multi_bind_example.js @@ -25,7 +25,7 @@ function insertData() { // bind table name and tags let tagBind = new taos.TaosBind(2); - tagBind.bindBinary("Beijing.Chaoyang"); + tagBind.bindBinary("California.SanFrancisco"); tagBind.bindInt(2); cursor.stmtSetTbnameTags("d1001", tagBind.getBind()); diff --git a/docs-examples/node/nativeexample/opentsdb_json_example.js b/docs-examples/node/nativeexample/opentsdb_json_example.js index 6d436a8e9ebe0230bba22064e8fb6c180c14b5d1..2d78444a3f805bc77ab5e11925a28dd18fe221fe 100644 --- a/docs-examples/node/nativeexample/opentsdb_json_example.js +++ b/docs-examples/node/nativeexample/opentsdb_json_example.js @@ -17,25 +17,25 @@ function insertData() { metric: "meters.current", timestamp: 1648432611249, value: 10.3, - tags: { location: "Beijing.Chaoyang", groupid: 2 }, + tags: { location: "California.SanFrancisco", groupid: 2 }, }, { metric: "meters.voltage", timestamp: 1648432611249, value: 219, - tags: { location: "Beijing.Haidian", groupid: 1 }, + tags: { location: "California.LosAngeles", groupid: 1 }, }, { metric: "meters.current", timestamp: 1648432611250, value: 12.6, - tags: { location: "Beijing.Chaoyang", groupid: 2 }, + tags: { location: "California.SanFrancisco", groupid: 2 }, }, { metric: "meters.voltage", timestamp: 1648432611250, value: 221, - tags: { location: "Beijing.Haidian", groupid: 1 }, + tags: { location: "California.LosAngeles", groupid: 1 }, }, ]; diff --git a/docs-examples/node/nativeexample/opentsdb_telnet_example.js b/docs-examples/node/nativeexample/opentsdb_telnet_example.js index 01e79c2dcacd923cd708d1d228959a628d0ff26a..7f80f558838e18f07ad79e580e7d08638b74e940 100644 --- a/docs-examples/node/nativeexample/opentsdb_telnet_example.js +++ b/docs-examples/node/nativeexample/opentsdb_telnet_example.js @@ -13,14 +13,14 @@ function createDatabase() { function insertData() { const lines = [ - "meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3", - "meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3", + "meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2", + "meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2", + "meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3", + "meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3", ]; cursor.schemalessInsert( lines, diff --git a/docs-examples/node/nativeexample/param_bind_example.js b/docs-examples/node/nativeexample/param_bind_example.js index 9117f46c3eeabd9009b72fa9d4a8503e65884242..c7e04c71a0d19ff8666f3d43fe09109009741266 100644 --- a/docs-examples/node/nativeexample/param_bind_example.js +++ b/docs-examples/node/nativeexample/param_bind_example.js @@ -24,7 +24,7 @@ function insertData() { // bind table name and tags let tagBind = new taos.TaosBind(2); - tagBind.bindBinary("Beijing.Chaoyang"); + tagBind.bindBinary("California.SanFrancisco"); tagBind.bindInt(2); cursor.stmtSetTbnameTags("d1001", tagBind.getBind()); diff --git a/docs-examples/php/connect.php b/docs-examples/php/connect.php index 5af77b9768e5c5ac4b774b433479a4ac8902beda..b825b447805a3923248042d2cdff79c51bdcdbe3 100644 --- a/docs-examples/php/connect.php +++ b/docs-examples/php/connect.php @@ -4,7 +4,7 @@ use TDengine\Connection; use TDengine\Exception\TDengineException; try { - // 实例化 + // instantiate $host = 'localhost'; $port = 6030; $username = 'root'; @@ -12,9 +12,9 @@ try { $dbname = null; $connection = new Connection($host, $port, $username, $password, $dbname); - // 连接 + // connect $connection->connect(); } catch (TDengineException $e) { - // 连接失败捕获异常 + // throw exception throw $e; } diff --git a/docs-examples/php/insert.php b/docs-examples/php/insert.php index 0d9cfc4843a2ec3e72d0ad128fa4c2650d6b9cf6..6e38fa0c46d31aa0a939d471ccbd255cfa453a16 100644 --- a/docs-examples/php/insert.php +++ b/docs-examples/php/insert.php @@ -4,7 +4,7 @@ use TDengine\Connection; use TDengine\Exception\TDengineException; try { - // 实例化 + // instantiate $host = 'localhost'; $port = 6030; $username = 'root'; @@ -12,22 +12,22 @@ try { $dbname = 'power'; $connection = new Connection($host, $port, $username, $password, $dbname); - // 连接 + // connect $connection->connect(); - // 插入 + // insert $connection->query('CREATE DATABASE if not exists power'); $connection->query('CREATE STABLE if not exists meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)'); $resource = $connection->query(<<<'SQL' - INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) - power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) - power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) - power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000) + INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) + power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) + power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) + power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000) SQL); - // 影响行数 + // get affected rows var_dump($resource->affectedRows()); } catch (TDengineException $e) { - // 捕获异常 + // throw exception throw $e; } diff --git a/docs-examples/php/insert_stmt.php b/docs-examples/php/insert_stmt.php index 5d4b4809d215d781807c21172982feff2171fe07..99a9a6aef3f69a8880316355e17396e06ca985c9 100644 --- a/docs-examples/php/insert_stmt.php +++ b/docs-examples/php/insert_stmt.php @@ -4,7 +4,7 @@ use TDengine\Connection; use TDengine\Exception\TDengineException; try { - // 实例化 + // instantiate $host = 'localhost'; $port = 6030; $username = 'root'; @@ -12,18 +12,18 @@ try { $dbname = 'power'; $connection = new Connection($host, $port, $username, $password, $dbname); - // 连接 + // connect $connection->connect(); - // 插入 + // insert $connection->query('CREATE DATABASE if not exists power'); $connection->query('CREATE STABLE if not exists meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)'); $stmt = $connection->prepare('INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)'); - // 设置表名和标签 + // set table name and tags $stmt->setTableNameTags('d1001', [ // 支持格式同参数绑定 - [TDengine\TSDB_DATA_TYPE_BINARY, 'Beijing.Chaoyang'], + [TDengine\TSDB_DATA_TYPE_BINARY, 'California.SanFrancisco'], [TDengine\TSDB_DATA_TYPE_INT, 2], ]); @@ -41,9 +41,9 @@ try { ]); $resource = $stmt->execute(); - // 影响行数 + // get affected rows var_dump($resource->affectedRows()); } catch (TDengineException $e) { - // 捕获异常 + // throw exception throw $e; } diff --git a/docs-examples/php/query.php b/docs-examples/php/query.php index 4e86a2cec7426887686049977a8647e786ac2744..2607940ea06a70eaa30e4c165c05bd72aa89857c 100644 --- a/docs-examples/php/query.php +++ b/docs-examples/php/query.php @@ -4,7 +4,7 @@ use TDengine\Connection; use TDengine\Exception\TDengineException; try { - // 实例化 + // instantiate $host = 'localhost'; $port = 6030; $username = 'root'; @@ -12,12 +12,12 @@ try { $dbname = 'power'; $connection = new Connection($host, $port, $username, $password, $dbname); - // 连接 + // connect $connection->connect(); $resource = $connection->query('SELECT ts, current FROM meters LIMIT 2'); var_dump($resource->fetch()); } catch (TDengineException $e) { - // 捕获异常 + // throw exception throw $e; } diff --git a/docs-examples/python/bind_param_example.py b/docs-examples/python/bind_param_example.py index 503a2eb5dd91a3516f87a4d3c1c3218cb6505236..6a67434f876f159cf32069a55e9527ca19034640 100644 --- a/docs-examples/python/bind_param_example.py +++ b/docs-examples/python/bind_param_example.py @@ -2,14 +2,14 @@ import taos from datetime import datetime # note: lines have already been sorted by table name -lines = [('d1001', '2018-10-03 14:38:05.000', 10.30000, 219, 0.31000, 'Beijing.Chaoyang', 2), - ('d1001', '2018-10-03 14:38:15.000', 12.60000, 218, 0.33000, 'Beijing.Chaoyang', 2), - ('d1001', '2018-10-03 14:38:16.800', 12.30000, 221, 0.31000, 'Beijing.Chaoyang', 2), - ('d1002', '2018-10-03 14:38:16.650', 10.30000, 218, 0.25000, 'Beijing.Chaoyang', 3), - ('d1003', '2018-10-03 14:38:05.500', 11.80000, 221, 0.28000, 'Beijing.Haidian', 2), - ('d1003', '2018-10-03 14:38:16.600', 13.40000, 223, 0.29000, 'Beijing.Haidian', 2), - ('d1004', '2018-10-03 14:38:05.000', 10.80000, 223, 0.29000, 'Beijing.Haidian', 3), - ('d1004', '2018-10-03 14:38:06.500', 11.50000, 221, 0.35000, 'Beijing.Haidian', 3)] +lines = [('d1001', '2018-10-03 14:38:05.000', 10.30000, 219, 0.31000, 'California.SanFrancisco', 2), + ('d1001', '2018-10-03 14:38:15.000', 12.60000, 218, 0.33000, 'California.SanFrancisco', 2), + ('d1001', '2018-10-03 14:38:16.800', 12.30000, 221, 0.31000, 'California.SanFrancisco', 2), + ('d1002', '2018-10-03 14:38:16.650', 10.30000, 218, 0.25000, 'California.SanFrancisco', 3), + ('d1003', '2018-10-03 14:38:05.500', 11.80000, 221, 0.28000, 'California.LosAngeles', 2), + ('d1003', '2018-10-03 14:38:16.600', 13.40000, 223, 0.29000, 'California.LosAngeles', 2), + ('d1004', '2018-10-03 14:38:05.000', 10.80000, 223, 0.29000, 'California.LosAngeles', 3), + ('d1004', '2018-10-03 14:38:06.500', 11.50000, 221, 0.35000, 'California.LosAngeles', 3)] def get_ts(ts: str): diff --git a/docs-examples/python/conn_native_pandas.py b/docs-examples/python/conn_native_pandas.py index 314759f7662c7bf4c9df2c8b3396ad3101c91cd4..56942ef57085766cd128b03cabb7a357587eab16 100644 --- a/docs-examples/python/conn_native_pandas.py +++ b/docs-examples/python/conn_native_pandas.py @@ -13,7 +13,7 @@ print(df.head(3)) # output: # RangeIndex(start=0, stop=8, step=1) # -# ts current voltage phase location groupid -# 0 2018-10-03 14:38:05.000 10.3 219 0.31 beijing.chaoyang 2 -# 1 2018-10-03 14:38:15.000 12.6 218 0.33 beijing.chaoyang 2 -# 2 2018-10-03 14:38:16.800 12.3 221 0.31 beijing.chaoyang 2 +# ts current ... location groupid +# 0 2018-10-03 14:38:05.500 11.8 ... california.losangeles 2 +# 1 2018-10-03 14:38:16.600 13.4 ... california.losangeles 2 +# 2 2018-10-03 14:38:05.000 10.8 ... california.losangeles 3 diff --git a/docs-examples/python/conn_rest_pandas.py b/docs-examples/python/conn_rest_pandas.py index 143e4275fa4eda685766297e4b90cba3935a574d..0164080cd5a05e72dce40b1d111ea423623ff9b2 100644 --- a/docs-examples/python/conn_rest_pandas.py +++ b/docs-examples/python/conn_rest_pandas.py @@ -11,9 +11,9 @@ print(type(df.ts[0])) print(df.head(3)) # output: -# # RangeIndex(start=0, stop=8, step=1) -# ts current ... location groupid -# 0 2018-10-03 14:38:05+08:00 10.3 ... beijing.chaoyang 2 -# 1 2018-10-03 14:38:15+08:00 12.6 ... beijing.chaoyang 2 -# 2 2018-10-03 14:38:16.800000+08:00 12.3 ... beijing.chaoyang 2 +# +# ts current ... location groupid +# 0 2018-10-03 06:38:05.500000+00:00 11.8 ... california.losangeles 2 +# 1 2018-10-03 06:38:16.600000+00:00 13.4 ... california.losangeles 2 +# 2 2018-10-03 06:38:05+00:00 10.8 ... california.losangeles 3 diff --git a/docs-examples/python/connect_rest_examples.py b/docs-examples/python/connect_rest_examples.py index a043d506b965bc31179dbb6f38749d196ab338ff..3303eb0e194ac28e9486ab153183c3b1f0b639f2 100644 --- a/docs-examples/python/connect_rest_examples.py +++ b/docs-examples/python/connect_rest_examples.py @@ -16,10 +16,10 @@ cursor.execute("CREATE DATABASE power") cursor.execute("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)") # insert data -cursor.execute("""INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) - power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) - power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) - power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)""") +cursor.execute("""INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) + power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) + power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) + power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)""") print("inserted row count:", cursor.rowcount) # query data @@ -38,8 +38,7 @@ for row in data: # inserted row count: 8 # queried row count: 3 # ['ts', 'current', 'voltage', 'phase', 'location', 'groupid'] -# [datetime.datetime(2018, 10, 3, 14, 38, 5, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 10.3, 219, 0.31, 'beijing.chaoyang', 2] -# [datetime.datetime(2018, 10, 3, 14, 38, 15, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 12.6, 218, 0.33, 'beijing.chaoyang', 2] -# [datetime.datetime(2018, 10, 3, 14, 38, 16, 800000, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 12.3, 221, 0.31, 'beijing.chaoyang', 2] - +# [datetime.datetime(2018, 10, 3, 14, 38, 5, 500000, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 11.8, 221, 0.28, 'california.losangeles', 2] +# [datetime.datetime(2018, 10, 3, 14, 38, 16, 600000, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 13.4, 223, 0.29, 'california.losangeles', 2] +# [datetime.datetime(2018, 10, 3, 14, 38, 5, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 10.8, 223, 0.29, 'california.losangeles', 3] # ANCHOR_END: basic diff --git a/docs-examples/python/json_protocol_example.py b/docs-examples/python/json_protocol_example.py index 5bb4d629bccf3d79e74b381d6259de86d6522315..58b38f3ff667bcbbd902434d3409441a4d2c5b45 100644 --- a/docs-examples/python/json_protocol_example.py +++ b/docs-examples/python/json_protocol_example.py @@ -3,12 +3,12 @@ import json import taos from taos import SmlProtocol, SmlPrecision -lines = [{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "Beijing.Chaoyang", "groupid": 2}}, +lines = [{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, {"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, - "tags": {"location": "Beijing.Haidian", "groupid": 1}}, + "tags": {"location": "California.LosAngeles", "groupid": 1}}, {"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, - "tags": {"location": "Beijing.Chaoyang", "groupid": 2}}, - {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "Beijing.Haidian", "groupid": 1}}] + "tags": {"location": "California.SanFrancisco", "groupid": 2}}, + {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "California.LosAngeles", "groupid": 1}}] def get_connection(): diff --git a/docs-examples/python/line_protocol_example.py b/docs-examples/python/line_protocol_example.py index 02baeb2104f9f48984b4d34afb5e67af641d4e32..735e8e7eb8aed1a8133de7a6de50bd50d076c472 100644 --- a/docs-examples/python/line_protocol_example.py +++ b/docs-examples/python/line_protocol_example.py @@ -1,10 +1,10 @@ import taos from taos import SmlProtocol, SmlPrecision -lines = ["meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000", - "meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500", - "meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249300", - "meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611249800", +lines = ["meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000", + "meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500", + "meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249300", + "meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611249800", ] diff --git a/docs-examples/python/multi_bind_example.py b/docs-examples/python/multi_bind_example.py index 1714121d72705ab8d619a41f3463af4aa3193871..205ba69fb267ae1781415e4f0995b41f908ceb17 100644 --- a/docs-examples/python/multi_bind_example.py +++ b/docs-examples/python/multi_bind_example.py @@ -3,10 +3,10 @@ from datetime import datetime # ANCHOR: bind_batch table_tags = { - "d1001": ('Beijing.Chaoyang', 2), - "d1002": ('Beijing.Chaoyang', 3), - "d1003": ('Beijing.Haidian', 2), - "d1004": ('Beijing.Haidian', 3) + "d1001": ('California.SanFrancisco', 2), + "d1002": ('California.SanFrancisco', 3), + "d1003": ('California.LosAngeles', 2), + "d1004": ('California.LosAngeles', 3) } table_values = { diff --git a/docs-examples/python/native_insert_example.py b/docs-examples/python/native_insert_example.py index 94d4888a8f5330b9e39d5ae051fcb68f9825505f..3b6b73cb2236c8d9d11019349f99f79135a5c1d6 100644 --- a/docs-examples/python/native_insert_example.py +++ b/docs-examples/python/native_insert_example.py @@ -1,13 +1,13 @@ import taos -lines = ["d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,Beijing.Chaoyang,2", - "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,Beijing.Haidian,3", - "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,Beijing.Haidian,2", - "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,Beijing.Haidian,3", - "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,Beijing.Chaoyang,3", - "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,Beijing.Chaoyang,2", - "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,Beijing.Chaoyang,2", - "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,Beijing.Haidian,2"] +lines = ["d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,California.SanFrancisco,2", + "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,California.LosAngeles,3", + "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,California.LosAngeles,2", + "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,California.LosAngeles,3", + "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,California.SanFrancisco,3", + "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,California.SanFrancisco,2", + "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,California.SanFrancisco,2", + "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,California.LosAngeles,2"] def get_connection() -> taos.TaosConnection: @@ -25,10 +25,10 @@ def create_stable(conn: taos.TaosConnection): # The generated SQL is: -# INSERT INTO d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) -# d1002 USING meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) -# d1003 USING meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) -# d1004 USING meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000) +# INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) +# d1002 USING meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) +# d1003 USING meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) +# d1004 USING meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000) def get_sql(): global lines diff --git a/docs-examples/python/query_example.py b/docs-examples/python/query_example.py index 6d33c49c968d9210b475931b5d8cecca0ceff3e3..8afd7f07358d7e9c9a3677ee04f8eb92aae6856b 100644 --- a/docs-examples/python/query_example.py +++ b/docs-examples/python/query_example.py @@ -12,10 +12,10 @@ def query_api_demo(conn: taos.TaosConnection): # field count: 7 -# meta of files[1]: {name: ts, type: 9, bytes: 8} +# meta of fields[1]: {name: ts, type: 9, bytes: 8} # ======================Iterate on result========================= -# ('d1001', datetime.datetime(2018, 10, 3, 14, 38, 5), 10.300000190734863, 219, 0.3100000023841858, 'Beijing.Chaoyang', 2) -# ('d1001', datetime.datetime(2018, 10, 3, 14, 38, 15), 12.600000381469727, 218, 0.33000001311302185, 'Beijing.Chaoyang', 2) +# ('d1003', datetime.datetime(2018, 10, 3, 14, 38, 5, 500000), 11.800000190734863, 221, 0.2800000011920929, 'california.losangeles', 2) +# ('d1003', datetime.datetime(2018, 10, 3, 14, 38, 16, 600000), 13.399999618530273, 223, 0.28999999165534973, 'california.losangeles', 2) # ANCHOR_END: iter # ANCHOR: fetch_all @@ -29,8 +29,8 @@ def fetch_all_demo(conn: taos.TaosConnection): # row count: 2 # ===============all data=================== -# [{'ts': datetime.datetime(2018, 10, 3, 14, 38, 5), 'current': 10.300000190734863}, -# {'ts': datetime.datetime(2018, 10, 3, 14, 38, 15), 'current': 12.600000381469727}] +# [{'ts': datetime.datetime(2018, 10, 3, 14, 38, 5, 500000), 'current': 11.800000190734863}, +# {'ts': datetime.datetime(2018, 10, 3, 14, 38, 16, 600000), 'current': 13.399999618530273}] # ANCHOR_END: fetch_all if __name__ == '__main__': diff --git a/docs-examples/python/telnet_line_protocol_example.py b/docs-examples/python/telnet_line_protocol_example.py index 072835109ee238940e6fe5880b72b2b04e0157fa..d812e186af86be6811ee7774f10458e46df1f39f 100644 --- a/docs-examples/python/telnet_line_protocol_example.py +++ b/docs-examples/python/telnet_line_protocol_example.py @@ -2,14 +2,14 @@ import taos from taos import SmlProtocol, SmlPrecision # format: =[ =] -lines = ["meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3", - "meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3", +lines = ["meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2", + "meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2", + "meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3", + "meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3", ] diff --git a/docs-examples/rust/nativeexample/examples/stmt_example.rs b/docs-examples/rust/nativeexample/examples/stmt_example.rs index a791a4135984a33dded145e8175d7ade57de8d77..190f8c1ef6d50a8e9c925178c1a9d31c22e3d4df 100644 --- a/docs-examples/rust/nativeexample/examples/stmt_example.rs +++ b/docs-examples/rust/nativeexample/examples/stmt_example.rs @@ -12,7 +12,7 @@ async fn main() -> Result<(), Error> { stmt.set_tbname_tags( "d1001", [ - Field::Binary(BString::from("Beijing.Chaoyang")), + Field::Binary(BString::from("California.SanFrancisco")), Field::Int(2), ], )?; diff --git a/docs-examples/rust/restexample/examples/insert_example.rs b/docs-examples/rust/restexample/examples/insert_example.rs index d7acc98d096fb3cd6bea22d6c5f6f0f5caea50af..9261536f627c297fc707708f88f57eed647dbf3e 100644 --- a/docs-examples/rust/restexample/examples/insert_example.rs +++ b/docs-examples/rust/restexample/examples/insert_example.rs @@ -5,10 +5,10 @@ async fn main() -> Result<(), Error> { let taos = TaosCfg::default().connect().expect("fail to connect"); taos.create_database("power").await?; taos.exec("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)").await?; - let sql = "INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) - power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) - power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) - power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)"; + let sql = "INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) + power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) + power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) + power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)"; let result = taos.query(sql).await?; println!("{:?}", result); Ok(()) diff --git a/docs-examples/rust/schemalessexample/examples/influxdb_line_example.rs b/docs-examples/rust/schemalessexample/examples/influxdb_line_example.rs index e93888cc83d12f3bec7370a66e8a85d38cec42ad..64d1a3c9ac6037c16e3e1c3be0258e19cce632a0 100644 --- a/docs-examples/rust/schemalessexample/examples/influxdb_line_example.rs +++ b/docs-examples/rust/schemalessexample/examples/influxdb_line_example.rs @@ -5,10 +5,10 @@ fn main() { let taos = TaosCfg::default().connect().expect("fail to connect"); taos.raw_query("CREATE DATABASE test").unwrap(); taos.raw_query("USE test").unwrap(); - let lines = ["meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249", - "meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250", - "meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249", - "meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250"]; + let lines = ["meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249", + "meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250", + "meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249", + "meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250"]; let affected_rows = taos .schemaless_insert( &lines, diff --git a/docs-examples/rust/schemalessexample/examples/opentsdb_json_example.rs b/docs-examples/rust/schemalessexample/examples/opentsdb_json_example.rs index 1d66bd1f2b1bcbe82dc3ee3e8e25ea4c521c81f0..e61691596704c8aaf979081429802df6e5aa86f9 100644 --- a/docs-examples/rust/schemalessexample/examples/opentsdb_json_example.rs +++ b/docs-examples/rust/schemalessexample/examples/opentsdb_json_example.rs @@ -6,10 +6,10 @@ fn main() { taos.raw_query("CREATE DATABASE test").unwrap(); taos.raw_query("USE test").unwrap(); let lines = [ - r#"[{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "Beijing.Chaoyang", "groupid": 2}}, - {"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, "tags": {"location": "Beijing.Haidian", "groupid": 1}}, - {"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, "tags": {"location": "Beijing.Chaoyang", "groupid": 2}}, - {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "Beijing.Haidian", "groupid": 1}}]"#, + r#"[{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, + {"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, "tags": {"location": "California.LosAngeles", "groupid": 1}}, + {"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, + {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "California.LosAngeles", "groupid": 1}}]"#, ]; let affected_rows = taos diff --git a/docs-examples/rust/schemalessexample/examples/opentsdb_telnet_example.rs b/docs-examples/rust/schemalessexample/examples/opentsdb_telnet_example.rs index 18d7500714d9e41b1bebd490199d296ead3dc7c4..c8cab7655a24806e5c7659af80e83da383539c55 100644 --- a/docs-examples/rust/schemalessexample/examples/opentsdb_telnet_example.rs +++ b/docs-examples/rust/schemalessexample/examples/opentsdb_telnet_example.rs @@ -6,14 +6,14 @@ fn main() { taos.raw_query("CREATE DATABASE test").unwrap(); taos.raw_query("USE test").unwrap(); let lines = [ - "meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3", - "meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3", + "meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2", + "meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2", + "meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3", + "meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3", ]; let affected_rows = taos .schemaless_insert( diff --git a/example/src/tstream.c b/example/src/tstream.c index 537bfebededab2d807b2df1a73a6c53ed98a96dd..97ff2886fcb95fcfaca19ba4baf70b64160855ca 100644 --- a/example/src/tstream.c +++ b/example/src/tstream.c @@ -82,9 +82,7 @@ int32_t create_stream() { /*const char* sql = "select sum(k) from tu1 interval(10m)";*/ /*pRes = tmq_create_stream(pConn, "stream1", "out1", sql);*/ pRes = taos_query( - pConn, - "create stream stream1 trigger at_once into outstb as select _wstartts, min(k), max(k), sum(k) as sum_of_k " - "from tu1 interval(10m)"); + pConn, "create stream stream1 trigger at_once into outstb as select _wstartts, sum(k) from tu1 interval(10m)"); if (taos_errno(pRes) != 0) { printf("failed to create stream stream1, reason:%s\n", taos_errstr(pRes)); return -1; diff --git a/examples/c/stream.c b/examples/c/stream.c deleted file mode 100644 index 41365813aeecc042d736fab8694642937abd27e4..0000000000000000000000000000000000000000 --- a/examples/c/stream.c +++ /dev/null @@ -1,178 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#include -#include -#include -#include -#include -#include "../../../include/client/taos.h" // include TDengine header file - -typedef struct { - char server_ip[64]; - char db_name[64]; - char tbl_name[64]; -} param; - -int g_thread_exit_flag = 0; -void* insert_rows(void *sarg); - -void streamCallBack(void *param, TAOS_RES *res, TAOS_ROW row) -{ - // in this simple demo, it just print out the result - char temp[128]; - - TAOS_FIELD *fields = taos_fetch_fields(res); - int numFields = taos_num_fields(res); - - taos_print_row(temp, row, fields, numFields); - - printf("\n%s\n", temp); -} - -int main(int argc, char *argv[]) -{ - TAOS *taos; - char db_name[64]; - char tbl_name[64]; - char sql[1024] = { 0 }; - - if (argc != 4) { - printf("usage: %s server-ip dbname tblname\n", argv[0]); - exit(0); - } - - strcpy(db_name, argv[2]); - strcpy(tbl_name, argv[3]); - - // create pthread to insert into row per second for stream calc - param *t_param = (param *)malloc(sizeof(param)); - if (NULL == t_param) - { - printf("failed to malloc\n"); - exit(1); - } - memset(t_param, 0, sizeof(param)); - strcpy(t_param->server_ip, argv[1]); - strcpy(t_param->db_name, db_name); - strcpy(t_param->tbl_name, tbl_name); - - pthread_t pid; - pthread_create(&pid, NULL, (void * (*)(void *))insert_rows, t_param); - - sleep(3); // waiting for database is created. - // open connection to database - taos = taos_connect(argv[1], "root", "taosdata", db_name, 0); - if (taos == NULL) { - printf("failed to connet to server:%s\n", argv[1]); - free(t_param); - exit(1); - } - - // starting stream calc, - printf("please input stream SQL:[e.g., select count(*) from tblname interval(5s) sliding(2s);]\n"); - fgets(sql, sizeof(sql), stdin); - if (sql[0] == 0) { - printf("input NULL stream SQL, so exit!\n"); - free(t_param); - exit(1); - } - - // param is set to NULL in this demo, it shall be set to the pointer to app context - TAOS_STREAM *pStream = taos_open_stream(taos, sql, streamCallBack, 0, NULL, NULL); - if (NULL == pStream) { - printf("failed to create stream\n"); - free(t_param); - exit(1); - } - - printf("presss any key to exit\n"); - getchar(); - - taos_close_stream(pStream); - - g_thread_exit_flag = 1; - pthread_join(pid, NULL); - - taos_close(taos); - free(t_param); - - return 0; -} - - -void* insert_rows(void *sarg) -{ - TAOS *taos; - char command[1024] = { 0 }; - param *winfo = (param * )sarg; - - if (NULL == winfo){ - printf("para is null!\n"); - exit(1); - } - - taos = taos_connect(winfo->server_ip, "root", "taosdata", NULL, 0); - if (taos == NULL) { - printf("failed to connet to server:%s\n", winfo->server_ip); - exit(1); - } - - // drop database - sprintf(command, "drop database %s;", winfo->db_name); - if (taos_query(taos, command) != 0) { - printf("failed to drop database, reason:%s\n", taos_errstr(taos)); - exit(1); - } - - // create database - sprintf(command, "create database %s;", winfo->db_name); - if (taos_query(taos, command) != 0) { - printf("failed to create database, reason:%s\n", taos_errstr(taos)); - exit(1); - } - - // use database - sprintf(command, "use %s;", winfo->db_name); - if (taos_query(taos, command) != 0) { - printf("failed to use database, reason:%s\n", taos_errstr(taos)); - exit(1); - } - - // create table - sprintf(command, "create table %s (ts timestamp, speed int);", winfo->tbl_name); - if (taos_query(taos, command) != 0) { - printf("failed to create table, reason:%s\n", taos_errstr(taos)); - exit(1); - } - - // insert data - int64_t begin = (int64_t)time(NULL); - int index = 0; - while (1) { - if (g_thread_exit_flag) break; - - index++; - sprintf(command, "insert into %s values (%ld, %d)", winfo->tbl_name, (begin + index) * 1000, index); - if (taos_query(taos, command)) { - printf("failed to insert row [%s], reason:%s\n", command, taos_errstr(taos)); - } - sleep(1); - } - - taos_close(taos); - return 0; -} - diff --git a/include/common/systable.h b/include/common/systable.h index e36beb13f2eb2cae1ec93495c3b84550fce617ce..8b0bb4a3fba107e1d74bee6885c39ae06d425a19 100644 --- a/include/common/systable.h +++ b/include/common/systable.h @@ -34,7 +34,6 @@ extern "C" { #define TSDB_INS_TABLE_USER_FUNCTIONS "user_functions" #define TSDB_INS_TABLE_USER_INDEXES "user_indexes" #define TSDB_INS_TABLE_USER_STABLES "user_stables" -#define TSDB_INS_TABLE_USER_STREAMS "user_streams" #define TSDB_INS_TABLE_USER_TABLES "user_tables" #define TSDB_INS_TABLE_USER_TABLE_DISTRIBUTED "user_table_distributed" #define TSDB_INS_TABLE_USER_USERS "user_users" diff --git a/include/common/taosdef.h b/include/common/taosdef.h index 72d2c142d2dafd55fac72ae8f0b0b58aad69b40d..d39c7a121593e6feeb5cfbf104d07642bdbfaff7 100644 --- a/include/common/taosdef.h +++ b/include/common/taosdef.h @@ -86,11 +86,17 @@ typedef enum { TSDB_RETENTION_MAX = 3 } ERetentionLevel; +typedef enum { + TSDB_BITMODE_DEFAULT = 0, // 2 bits + TSDB_BITMODE_ONE_BIT = 1, // 1 bit +} EBitmapMode; + extern char *qtypeStr[]; #define TSDB_PORT_HTTP 11 #undef TD_DEBUG_PRINT_ROW +#undef TD_DEBUG_PRINT_TSDB_LOAD_DCOLS #ifdef __cplusplus } diff --git a/include/common/tcommon.h b/include/common/tcommon.h index 9e3ad42a82fe779bc507417d84718b342a98a34e..88fa0e728f397006759e296cf1e3533816ee540f 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -53,10 +53,9 @@ typedef enum EStreamType { } EStreamType; typedef struct { - uint32_t numOfTables; - SArray* pGroupList; + SArray* pTableList; SHashObj* map; // speedup acquire the tableQueryInfo by table uid -} STableGroupInfo; +} STableListInfo; typedef struct SColumnDataAgg { int16_t colId; @@ -106,12 +105,14 @@ typedef struct SColumnInfoData { } SColumnInfoData; typedef struct SQueryTableDataCond { - STimeWindow twindow; + //STimeWindow twindow; int32_t order; // desc|asc order to iterate the data block int32_t numOfCols; SColumnInfo *colList; bool loadExternalRows; // load external rows or not int32_t type; // data block load type: + int32_t numOfTWindows; + STimeWindow *twindows; } SQueryTableDataCond; void* blockDataDestroy(SSDataBlock* pBlock); @@ -219,6 +220,16 @@ typedef struct { #define GET_FORWARD_DIRECTION_FACTOR(ord) (((ord) == TSDB_ORDER_ASC) ? QUERY_ASC_FORWARD_STEP : QUERY_DESC_FORWARD_STEP) +#define SORT_QSORT_T 0x1 +#define SORT_SPILLED_MERGE_SORT_T 0x2 +typedef struct SSortExecInfo { + int32_t sortMethod; + int32_t sortBuffer; + int32_t loops; // loop count + int32_t writeBytes; // write io bytes + int32_t readBytes; // read io bytes +} SSortExecInfo; + #ifdef __cplusplus } #endif diff --git a/include/common/tdatablock.h b/include/common/tdatablock.h index db8644ecfed50f354e61ff20b424f93dc559f8d7..66b81efc5b32b961de01fce1dbe5a5a6cee808ef 100644 --- a/include/common/tdatablock.h +++ b/include/common/tdatablock.h @@ -198,7 +198,7 @@ void colDataTrim(SColumnInfoData* pColumnInfoData); size_t blockDataGetNumOfCols(const SSDataBlock* pBlock); size_t blockDataGetNumOfRows(const SSDataBlock* pBlock); -int32_t blockDataMerge(SSDataBlock* pDest, const SSDataBlock* pSrc, SArray* pIndexMap); +int32_t blockDataMerge(SSDataBlock* pDest, const SSDataBlock* pSrc); int32_t blockDataSplitRows(SSDataBlock* pBlock, bool hasVarCol, int32_t startIndex, int32_t* stopIndex, int32_t pageSize); int32_t blockDataToBuf(char* buf, const SSDataBlock* pBlock); @@ -227,12 +227,16 @@ int32_t blockDataTrimFirstNRows(SSDataBlock* pBlock, size_t n); SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData); +void blockCompressEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_t numOfCols, int8_t needCompress); +const char* blockCompressDecode(SSDataBlock* pBlock, int32_t numOfCols, int32_t numOfRows, const char* pData); + void blockDebugShowData(const SArray* dataBlocks); int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks, STSchema* pTSchema, int32_t vgId, - tb_uid_t uid, tb_uid_t suid); + tb_uid_t suid); -SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pSchema, bool createTb, int64_t suid, int32_t vgId); +SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pSchema, bool createTb, int64_t suid, + const char* stbFullName, int32_t vgId); static FORCE_INLINE int32_t blockGetEncodeSize(const SSDataBlock* pBlock) { return blockDataGetSerialMetaSize(pBlock) + blockDataGetSize(pBlock); @@ -245,57 +249,8 @@ static FORCE_INLINE int32_t blockCompressColData(SColumnInfoData* pColRes, int32 colSize + COMP_OVERFLOW_BYTES, compressed, NULL, 0); } -static FORCE_INLINE void blockCompressEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_t numOfCols, - int8_t needCompress) { - int32_t* actualLen = (int32_t*)data; - data += sizeof(int32_t); - - uint64_t* groupId = (uint64_t*)data; - data += sizeof(uint64_t); - - int32_t* colSizes = (int32_t*)data; - data += numOfCols * sizeof(int32_t); - - *dataLen = (numOfCols * sizeof(int32_t) + sizeof(uint64_t) + sizeof(int32_t)); - - int32_t numOfRows = pBlock->info.rows; - for (int32_t col = 0; col < numOfCols; ++col) { - SColumnInfoData* pColRes = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, col); - - // copy the null bitmap - if (IS_VAR_DATA_TYPE(pColRes->info.type)) { - size_t metaSize = numOfRows * sizeof(int32_t); - memcpy(data, pColRes->varmeta.offset, metaSize); - data += metaSize; - (*dataLen) += metaSize; - } else { - int32_t len = BitmapLen(numOfRows); - memcpy(data, pColRes->nullbitmap, len); - data += len; - (*dataLen) += len; - } - - if (needCompress) { - colSizes[col] = blockCompressColData(pColRes, numOfRows, data, needCompress); - data += colSizes[col]; - (*dataLen) += colSizes[col]; - } else { - colSizes[col] = colDataGetLength(pColRes, numOfRows); - (*dataLen) += colSizes[col]; - memmove(data, pColRes->pData, colSizes[col]); - data += colSizes[col]; - } - - colSizes[col] = htonl(colSizes[col]); - } - - *actualLen = *dataLen; - *groupId = pBlock->info.groupId; -} - #ifdef __cplusplus } #endif #endif /*_TD_COMMON_EP_H_*/ - diff --git a/include/common/tdataformat.h b/include/common/tdataformat.h index e13705d403f9acbd295c238af4d3fe524d3eb5ca..ef931ed3b1c52b7bdc9d12da77f3bdc8ad1f7837 100644 --- a/include/common/tdataformat.h +++ b/include/common/tdataformat.h @@ -61,9 +61,10 @@ int32_t tTSRowBuilderGetRow(STSRowBuilder *pBuilder, const STSRow2 **ppRow); // STag int32_t tTagNew(STagVal *pTagVals, int16_t nTag, STag **ppTag); void tTagFree(STag *pTag); -void tTagGet(STag *pTag, int16_t cid, int8_t type, uint8_t **ppData, int32_t *nData); -int32_t tEncodeTag(SEncoder *pEncoder, STag *pTag); -int32_t tDecodeTag(SDecoder *pDecoder, const STag **ppTag); +int32_t tTagSet(STag *pTag, SSchema *pSchema, int32_t nCols, int iCol, uint8_t *pData, uint32_t nData, STag **ppTag); +void tTagGet(STag *pTag, int16_t cid, int8_t type, uint8_t **ppData, uint32_t *nData); +int32_t tEncodeTag(SEncoder *pEncoder, const STag *pTag); +int32_t tDecodeTag(SDecoder *pDecoder, STag **ppTag); // STRUCT ================= struct STColumn { @@ -313,8 +314,9 @@ typedef struct { SDataCol *cols; } SDataCols; -static FORCE_INLINE bool tdDataColsIsBitmapI(SDataCols *pCols) { return pCols->bitmapMode != 0; } -static FORCE_INLINE void tdDataColsSetBitmapI(SDataCols *pCols) { pCols->bitmapMode = 1; } +static FORCE_INLINE bool tdDataColsIsBitmapI(SDataCols *pCols) { return pCols->bitmapMode != TSDB_BITMODE_DEFAULT; } +static FORCE_INLINE void tdDataColsSetBitmapI(SDataCols *pCols) { pCols->bitmapMode = TSDB_BITMODE_ONE_BIT; } +static FORCE_INLINE bool tdIsBitmapModeI(int8_t bitmapMode) { return bitmapMode != TSDB_BITMODE_DEFAULT; } #define keyCol(pCols) (&((pCols)->cols[0])) // Key column #define dataColsTKeyAt(pCols, idx) ((TKEY *)(keyCol(pCols)->pData))[(idx)] // the idx row of column-wised data diff --git a/include/common/tmsg.h b/include/common/tmsg.h index b89309906417a2876fba667fcaaf15be7219d398..c4abfffc615e9c7e2d054d488a27a4c5992e64f0 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -244,12 +244,12 @@ typedef struct { const void* pMsg; } SSubmitMsgIter; -int32_t tInitSubmitMsgIter(const SSubmitReq* pMsg, SSubmitMsgIter* pIter); +int32_t tInitSubmitMsgIter(SSubmitReq* pMsg, SSubmitMsgIter* pIter); int32_t tGetSubmitMsgNext(SSubmitMsgIter* pIter, SSubmitBlk** pPBlock); int32_t tInitSubmitBlkIter(SSubmitMsgIter* pMsgIter, SSubmitBlk* pBlock, SSubmitBlkIter* pIter); STSRow* tGetSubmitBlkNext(SSubmitBlkIter* pIter); // for debug -int32_t tPrintFixedSchemaSubmitReq(const SSubmitReq* pReq, STSchema* pSchema); +int32_t tPrintFixedSchemaSubmitReq(SSubmitReq* pReq, STSchema* pSchema); typedef struct { int32_t code; @@ -300,9 +300,7 @@ typedef struct SSchema { typedef struct { int32_t nCols; - int32_t sver; - int32_t tagVer; - int32_t colVer; + int32_t version; SSchema* pSchema; } SSchemaWrapper; @@ -310,9 +308,7 @@ static FORCE_INLINE SSchemaWrapper* tCloneSSchemaWrapper(const SSchemaWrapper* p SSchemaWrapper* pSW = (SSchemaWrapper*)taosMemoryMalloc(sizeof(SSchemaWrapper)); if (pSW == NULL) return pSW; pSW->nCols = pSchemaWrapper->nCols; - pSW->sver = pSchemaWrapper->sver; - pSW->tagVer = pSchemaWrapper->tagVer; - pSW->colVer = pSchemaWrapper->colVer; + pSW->version = pSchemaWrapper->version; pSW->pSchema = (SSchema*)taosMemoryCalloc(pSW->nCols, sizeof(SSchema)); if (pSW->pSchema == NULL) { taosMemoryFree(pSW); @@ -367,9 +363,7 @@ static FORCE_INLINE int32_t tDecodeSSchema(SDecoder* pDecoder, SSchema* pSchema) static FORCE_INLINE int32_t taosEncodeSSchemaWrapper(void** buf, const SSchemaWrapper* pSW) { int32_t tlen = 0; tlen += taosEncodeVariantI32(buf, pSW->nCols); - tlen += taosEncodeVariantI32(buf, pSW->sver); - tlen += taosEncodeVariantI32(buf, pSW->tagVer); - tlen += taosEncodeVariantI32(buf, pSW->colVer); + tlen += taosEncodeVariantI32(buf, pSW->version); for (int32_t i = 0; i < pSW->nCols; i++) { tlen += taosEncodeSSchema(buf, &pSW->pSchema[i]); } @@ -378,9 +372,7 @@ static FORCE_INLINE int32_t taosEncodeSSchemaWrapper(void** buf, const SSchemaWr static FORCE_INLINE void* taosDecodeSSchemaWrapper(const void* buf, SSchemaWrapper* pSW) { buf = taosDecodeVariantI32(buf, &pSW->nCols); - buf = taosDecodeVariantI32(buf, &pSW->sver); - buf = taosDecodeVariantI32(buf, &pSW->tagVer); - buf = taosDecodeVariantI32(buf, &pSW->colVer); + buf = taosDecodeVariantI32(buf, &pSW->version); pSW->pSchema = (SSchema*)taosMemoryCalloc(pSW->nCols, sizeof(SSchema)); if (pSW->pSchema == NULL) { return NULL; @@ -394,9 +386,7 @@ static FORCE_INLINE void* taosDecodeSSchemaWrapper(const void* buf, SSchemaWrapp static FORCE_INLINE int32_t tEncodeSSchemaWrapper(SEncoder* pEncoder, const SSchemaWrapper* pSW) { if (tEncodeI32v(pEncoder, pSW->nCols) < 0) return -1; - if (tEncodeI32v(pEncoder, pSW->sver) < 0) return -1; - if (tEncodeI32v(pEncoder, pSW->tagVer) < 0) return -1; - if (tEncodeI32v(pEncoder, pSW->colVer) < 0) return -1; + if (tEncodeI32v(pEncoder, pSW->version) < 0) return -1; for (int32_t i = 0; i < pSW->nCols; i++) { if (tEncodeSSchema(pEncoder, &pSW->pSchema[i]) < 0) return -1; } @@ -406,9 +396,7 @@ static FORCE_INLINE int32_t tEncodeSSchemaWrapper(SEncoder* pEncoder, const SSch static FORCE_INLINE int32_t tDecodeSSchemaWrapper(SDecoder* pDecoder, SSchemaWrapper* pSW) { if (tDecodeI32v(pDecoder, &pSW->nCols) < 0) return -1; - if (tDecodeI32v(pDecoder, &pSW->sver) < 0) return -1; - if (tDecodeI32v(pDecoder, &pSW->tagVer) < 0) return -1; - if (tDecodeI32v(pDecoder, &pSW->colVer) < 0) return -1; + if (tDecodeI32v(pDecoder, &pSW->version) < 0) return -1; pSW->pSchema = (SSchema*)taosMemoryCalloc(pSW->nCols, sizeof(SSchema)); if (pSW->pSchema == NULL) return -1; @@ -421,9 +409,7 @@ static FORCE_INLINE int32_t tDecodeSSchemaWrapper(SDecoder* pDecoder, SSchemaWra static FORCE_INLINE int32_t tDecodeSSchemaWrapperEx(SDecoder* pDecoder, SSchemaWrapper* pSW) { if (tDecodeI32v(pDecoder, &pSW->nCols) < 0) return -1; - if (tDecodeI32v(pDecoder, &pSW->sver) < 0) return -1; - if (tDecodeI32v(pDecoder, &pSW->tagVer) < 0) return -1; - if (tDecodeI32v(pDecoder, &pSW->colVer) < 0) return -1; + if (tDecodeI32v(pDecoder, &pSW->version) < 0) return -1; pSW->pSchema = (SSchema*)tDecoderMalloc(pDecoder, pSW->nCols * sizeof(SSchema)); if (pSW->pSchema == NULL) return -1; @@ -469,7 +455,8 @@ int32_t tDeserializeSMDropStbReq(void* buf, int32_t bufLen, SMDropStbReq* pReq); typedef struct { char name[TSDB_TABLE_FNAME_LEN]; int8_t alterType; - int32_t verInBlock; + int32_t tagVer; + int32_t colVer; int32_t numOfFields; SArray* pFields; int32_t ttl; @@ -492,12 +479,8 @@ int32_t tDecodeSEpSet(SDecoder* pDecoder, SEpSet* pEp); int32_t taosEncodeSEpSet(void** buf, const SEpSet* pEp); void* taosDecodeSEpSet(const void* buf, SEpSet* pEp); -typedef struct { - SEpSet epSet; -} SMEpSet; - -int32_t tSerializeSMEpSet(void* buf, int32_t bufLen, SMEpSet* pReq); -int32_t tDeserializeSMEpSet(void* buf, int32_t buflen, SMEpSet* pReq); +int32_t tSerializeSEpSet(void* buf, int32_t bufLen, const SEpSet* pEpset); +int32_t tDeserializeSEpSet(void* buf, int32_t buflen, SEpSet* pEpset); typedef struct { int8_t connType; @@ -588,13 +571,6 @@ int32_t tSerializeSGetUserAuthRsp(void* buf, int32_t bufLen, SGetUserAuthRsp* pR int32_t tDeserializeSGetUserAuthRsp(void* buf, int32_t bufLen, SGetUserAuthRsp* pRsp); void tFreeSGetUserAuthRsp(SGetUserAuthRsp* pRsp); -typedef struct { - int16_t colId; // column id - int16_t colIndex; // column index in colList if it is a normal column or index in tagColList if a tag - int16_t flag; // denote if it is a tag or a normal column - char name[TSDB_DB_FNAME_LEN]; -} SColIndex; - typedef struct { int16_t lowerRelOptr; int16_t upperRelOptr; @@ -660,8 +636,7 @@ typedef struct { int32_t tz; // query client timezone char intervalUnit; char slidingUnit; - char - offsetUnit; // TODO Remove it, the offset is the number of precision tickle, and it must be a immutable duration. + char offsetUnit; int8_t precision; int64_t interval; int64_t sliding; @@ -670,6 +645,9 @@ typedef struct { typedef struct { int32_t code; + char tbFName[TSDB_TABLE_FNAME_LEN]; + int32_t sversion; + int32_t tversion; } SQueryTableRsp; int32_t tSerializeSQueryTableRsp(void* buf, int32_t bufLen, SQueryTableRsp* pRsp); @@ -696,6 +674,7 @@ typedef struct { int8_t replications; int8_t strict; int8_t cacheLastRow; + int8_t schemaless; int8_t ignoreExist; int32_t numOfRetensions; SArray* pRetensions; // SRetention @@ -950,6 +929,7 @@ typedef struct { int32_t numOfCores; int32_t numOfSupportVnodes; char dnodeEp[TSDB_EP_LEN]; + SMnodeLoad mload; SClusterCfg clusterCfg; SArray* pVloads; // array of SVnodeLoad } SStatusReq; @@ -1022,6 +1002,10 @@ typedef struct { SReplica replicas[TSDB_MAX_REPLICA]; int32_t numOfRetensions; SArray* pRetensions; // SRetention + + // for tsma + int8_t isTsma; + } SCreateVnodeReq; int32_t tSerializeSCreateVnodeReq(void* buf, int32_t bufLen, SCreateVnodeReq* pReq); @@ -1210,9 +1194,10 @@ typedef struct { } SRetrieveMetaTableRsp; typedef struct SExplainExecInfo { - uint64_t startupCost; - uint64_t totalCost; + double startupCost; + double totalCost; uint64_t numOfRows; + uint32_t verboseLen; void* verboseInfo; } SExplainExecInfo; @@ -1221,6 +1206,18 @@ typedef struct { SExplainExecInfo* subplanInfo; } SExplainRsp; +typedef struct STableScanAnalyzeInfo { + uint64_t totalRows; + uint64_t totalCheckedRows; + uint32_t totalBlocks; + uint32_t loadBlocks; + uint32_t loadBlockStatis; + uint32_t skipBlocks; + uint32_t filterOutBlocks; + double elapsedTime; + uint64_t filterTime; +} STableScanAnalyzeInfo; + int32_t tSerializeSExplainRsp(void* buf, int32_t bufLen, SExplainRsp* pRsp); int32_t tDeserializeSExplainRsp(void* buf, int32_t bufLen, SExplainRsp* pRsp); @@ -1260,7 +1257,6 @@ int32_t tSerializeSCreateDropMQSBNodeReq(void* buf, int32_t bufLen, SMCreateQnod int32_t tDeserializeSCreateDropMQSBNodeReq(void* buf, int32_t bufLen, SMCreateQnodeReq* pReq); typedef struct { - int32_t dnodeId; int8_t replica; SReplica replicas[TSDB_MAX_REPLICA]; } SDCreateMnodeReq, SDAlterMnodeReq; @@ -1633,8 +1629,8 @@ _err: return NULL; } -// this message is sent from mnode to mnode(read thread to write thread), so there is no need for serialization or -// deserialization +// this message is sent from mnode to mnode(read thread to write thread), +// so there is no need for serialization or deserialization typedef struct { SHashObj* rebSubHash; // SHashObj } SMqDoRebalanceMsg; @@ -1660,6 +1656,10 @@ typedef struct { int32_t tSerializeSMDropCgroupReq(void* buf, int32_t bufLen, SMDropCgroupReq* pReq); int32_t tDeserializeSMDropCgroupReq(void* buf, int32_t bufLen, SMDropCgroupReq* pReq); +typedef struct { + int8_t reserved; +} SMDropCgroupRsp; + typedef struct { char name[TSDB_TABLE_FNAME_LEN]; int8_t alterType; @@ -1697,10 +1697,10 @@ int32_t tDecodeSRSmaParam(SDecoder* pCoder, SRSmaParam* pRSmaParam); // TDMT_VND_CREATE_STB ============== typedef struct SVCreateStbReq { - const char* name; + char* name; tb_uid_t suid; int8_t rollup; - SSchemaWrapper schema; + SSchemaWrapper schemaRow; SSchemaWrapper schemaTag; SRSmaParam pRSmaParam; } SVCreateStbReq; @@ -1710,8 +1710,8 @@ int tDecodeSVCreateStbReq(SDecoder* pCoder, SVCreateStbReq* pReq); // TDMT_VND_DROP_STB ============== typedef struct SVDropStbReq { - const char* name; - tb_uid_t suid; + char* name; + tb_uid_t suid; } SVDropStbReq; int32_t tEncodeSVDropStbReq(SEncoder* pCoder, const SVDropStbReq* pReq); @@ -1720,19 +1720,19 @@ int32_t tDecodeSVDropStbReq(SDecoder* pCoder, SVDropStbReq* pReq); // TDMT_VND_CREATE_TABLE ============== #define TD_CREATE_IF_NOT_EXISTS 0x1 typedef struct SVCreateTbReq { - int32_t flags; - tb_uid_t uid; - int64_t ctime; - const char* name; - int32_t ttl; - int8_t type; + int32_t flags; + char* name; + tb_uid_t uid; + int64_t ctime; + int32_t ttl; + int8_t type; union { struct { - tb_uid_t suid; - const uint8_t* pTag; + tb_uid_t suid; + uint8_t* pTag; } ctb; struct { - SSchemaWrapper schema; + SSchemaWrapper schemaRow; } ntb; }; } SVCreateTbReq; @@ -1777,8 +1777,8 @@ int32_t tDeserializeSVCreateTbBatchRsp(void* buf, int32_t bufLen, SVCreateTbBatc // TDMT_VND_DROP_TABLE ================= typedef struct { - const char* name; - int8_t igNotExists; + char* name; + int8_t igNotExists; } SVDropTbReq; typedef struct { @@ -1809,9 +1809,9 @@ int32_t tDecodeSVDropTbBatchRsp(SDecoder* pCoder, SVDropTbBatchRsp* pRsp); // TDMT_VND_ALTER_TABLE ===================== typedef struct { - const char* tbName; - int8_t action; - const char* colName; + char* tbName; + int8_t action; + char* colName; // TSDB_ALTER_TABLE_ADD_COLUMN int8_t type; int8_t flags; @@ -1820,17 +1820,17 @@ typedef struct { // TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES int32_t colModBytes; // TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME - const char* colNewName; + char* colNewName; // TSDB_ALTER_TABLE_UPDATE_TAG_VAL - const char* tagName; - int8_t isNull; - uint32_t nTagVal; - const uint8_t* pTagVal; + char* tagName; + int8_t isNull; + uint32_t nTagVal; + uint8_t* pTagVal; // TSDB_ALTER_TABLE_UPDATE_OPTIONS - int8_t updateTTL; - int32_t newTTL; - int8_t updateComment; - const char* newComment; + int8_t updateTTL; + int32_t newTTL; + int8_t updateComment; + char* newComment; } SVAlterTbReq; int32_t tEncodeSVAlterTbReq(SEncoder* pEncoder, const SVAlterTbReq* pReq); @@ -2020,7 +2020,7 @@ static FORCE_INLINE void tFreeClientHbBatchRsp(void* pRsp) { int32_t tSerializeSClientHbBatchRsp(void* buf, int32_t bufLen, const SClientHbBatchRsp* pBatchRsp); int32_t tDeserializeSClientHbBatchRsp(void* buf, int32_t bufLen, SClientHbBatchRsp* pBatchRsp); -void tFreeSClientHbBatchRsp(SClientHbBatchRsp *pBatchRsp); +void tFreeSClientHbBatchRsp(SClientHbBatchRsp* pBatchRsp); static FORCE_INLINE int32_t tEncodeSKv(SEncoder* pEncoder, const SKv* pKv) { if (tEncodeI32(pEncoder, pKv->key) < 0) return -1; @@ -2255,20 +2255,20 @@ int32_t tSerializeSMDropSmaReq(void* buf, int32_t bufLen, SMDropSmaReq* pReq); int32_t tDeserializeSMDropSmaReq(void* buf, int32_t bufLen, SMDropSmaReq* pReq); typedef struct { - int8_t version; // for compatibility(default 0) - int8_t intervalUnit; // MACRO: TIME_UNIT_XXX - int8_t slidingUnit; // MACRO: TIME_UNIT_XXX - int8_t timezoneInt; // sma data expired if timezone changes. - char indexName[TSDB_INDEX_NAME_LEN]; - int32_t exprLen; - int32_t tagsFilterLen; - int64_t indexUid; - tb_uid_t tableUid; // super/child/common table uid - int64_t interval; - int64_t offset; // use unit by precision of DB - int64_t sliding; - const char* expr; // sma expression - const char* tagsFilter; + int8_t version; // for compatibility(default 0) + int8_t intervalUnit; // MACRO: TIME_UNIT_XXX + int8_t slidingUnit; // MACRO: TIME_UNIT_XXX + int8_t timezoneInt; // sma data expired if timezone changes. + char indexName[TSDB_INDEX_NAME_LEN]; + int32_t exprLen; + int32_t tagsFilterLen; + int64_t indexUid; + tb_uid_t tableUid; // super/child/common table uid + int64_t interval; + int64_t offset; // use unit by precision of DB + int64_t sliding; + char* expr; // sma expression + char* tagsFilter; } STSma; // Time-range-wise SMA typedef STSma SVCreateTSmaReq; @@ -2596,12 +2596,12 @@ static FORCE_INLINE void tDeleteSMqAskEpRsp(SMqAskEpRsp* pRsp) { #define TD_AUTO_CREATE_TABLE 0x1 typedef struct { - int64_t suid; - int64_t uid; - int32_t sver; - uint32_t nData; - const uint8_t* pData; - SVCreateTbReq cTbReq; + int64_t suid; + int64_t uid; + int32_t sver; + uint32_t nData; + uint8_t* pData; + SVCreateTbReq cTbReq; } SVSubmitBlk; typedef struct { diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h index 455898585aaec2935d72aab0cdf6dfab6a0aac48..51a15c1489cf94d755dfdda386edae8c2ae4a708 100644 --- a/include/common/tmsgdef.h +++ b/include/common/tmsgdef.h @@ -144,12 +144,14 @@ enum { TD_DEF_MSG_TYPE(TDMT_MND_CREATE_TOPIC, "mnode-create-topic", SMCreateTopicReq, SMCreateTopicRsp) TD_DEF_MSG_TYPE(TDMT_MND_ALTER_TOPIC, "mnode-alter-topic", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_DROP_TOPIC, "mnode-drop-topic", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_DROP_CGROUP, "mnode-drop-cgroup", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_SUBSCRIBE, "mnode-subscribe", SCMSubscribeReq, SCMSubscribeRsp) TD_DEF_MSG_TYPE(TDMT_MND_MQ_ASK_EP, "mnode-mq-ask-ep", SMqAskEpReq, SMqAskEpRsp) TD_DEF_MSG_TYPE(TDMT_MND_MQ_TIMER, "mnode-mq-tmr", SMTimerReq, NULL) TD_DEF_MSG_TYPE(TDMT_MND_MQ_CONSUMER_LOST, "mnode-mq-consumer-lost", SMqConsumerLostMsg, NULL) TD_DEF_MSG_TYPE(TDMT_MND_MQ_CONSUMER_RECOVER, "mnode-mq-consumer-recover", SMqConsumerRecoverMsg, NULL) TD_DEF_MSG_TYPE(TDMT_MND_MQ_DO_REBALANCE, "mnode-mq-do-rebalance", SMqDoRebalanceMsg, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_MQ_DROP_CGROUP, "mnode-mq-drop-cgroup", SMqDropCGroupReq, SMqDropCGroupRsp) TD_DEF_MSG_TYPE(TDMT_MND_MQ_COMMIT_OFFSET, "mnode-mq-commit-offset", SMqCMCommitOffsetReq, SMqCMCommitOffsetRsp) TD_DEF_MSG_TYPE(TDMT_MND_CREATE_STREAM, "mnode-create-stream", SCMCreateStreamReq, SCMCreateStreamRsp) TD_DEF_MSG_TYPE(TDMT_MND_ALTER_STREAM, "mnode-alter-stream", NULL, NULL) @@ -158,6 +160,7 @@ enum { TD_DEF_MSG_TYPE(TDMT_MND_DROP_INDEX, "mnode-drop-index", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_GET_DB_CFG, "mnode-get-db-cfg", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_GET_INDEX, "mnode-get-index", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_APPLY_MSG, "mnode-apply-msg", NULL, NULL) // Requests handled by VNODE TD_NEW_MSG_SEG(TDMT_VND_MSG) @@ -179,8 +182,6 @@ enum { TD_DEF_MSG_TYPE(TDMT_VND_MQ_DISCONNECT, "vnode-mq-disconnect", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_MQ_VG_CHANGE, "vnode-mq-vg-change", SMqRebVgReq, SMqRebVgRsp) TD_DEF_MSG_TYPE(TDMT_VND_MQ_VG_DELETE, "vnode-mq-vg-delete", SMqVDeleteReq, SMqVDeleteRsp) - TD_DEF_MSG_TYPE(TDMT_VND_RES_READY, "vnode-res-ready", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_VND_TASKS_STATUS, "vnode-tasks-status", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_CANCEL_TASK, "vnode-cancel-task", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_DROP_TASK, "vnode-drop-task", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_CREATE_TOPIC, "vnode-create-topic", NULL, NULL) @@ -193,11 +194,8 @@ enum { TD_DEF_MSG_TYPE(TDMT_VND_EXPLAIN, "vnode-explain", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_SUBSCRIBE, "vnode-subscribe", SMVSubscribeReq, SMVSubscribeRsp) - TD_DEF_MSG_TYPE(TDMT_VND_CONSUME, "vnode-consume", SMqCVConsumeReq, SMqCVConsumeRsp) + TD_DEF_MSG_TYPE(TDMT_VND_CONSUME, "vnode-consume", SMqPollReq, SMqDataBlkRsp) TD_DEF_MSG_TYPE(TDMT_VND_TASK_DEPLOY, "vnode-task-deploy", SStreamTaskDeployReq, SStreamTaskDeployRsp) - TD_DEF_MSG_TYPE(TDMT_VND_TASK_PIPE_EXEC, "vnode-task-pipe-exec", SStreamTaskExecReq, SStreamTaskExecRsp) - TD_DEF_MSG_TYPE(TDMT_VND_TASK_MERGE_EXEC, "vnode-task-merge-exec", SStreamTaskExecReq, SStreamTaskExecRsp) - TD_DEF_MSG_TYPE(TDMT_VND_TASK_WRITE_EXEC, "vnode-task-write-exec", SStreamTaskExecReq, SStreamTaskExecRsp) TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TRIGGER, "vnode-stream-trigger", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_TASK_RUN, "vnode-stream-task-run", NULL, NULL) @@ -234,9 +232,13 @@ enum { // Requests handled by SNODE TD_NEW_MSG_SEG(TDMT_SND_MSG) TD_DEF_MSG_TYPE(TDMT_SND_TASK_DEPLOY, "snode-task-deploy", SStreamTaskDeployReq, SStreamTaskDeployRsp) - TD_DEF_MSG_TYPE(TDMT_SND_TASK_EXEC, "snode-task-exec", SStreamTaskExecReq, SStreamTaskExecRsp) - TD_DEF_MSG_TYPE(TDMT_SND_TASK_PIPE_EXEC, "snode-task-pipe-exec", SStreamTaskExecReq, SStreamTaskExecRsp) - TD_DEF_MSG_TYPE(TDMT_SND_TASK_MERGE_EXEC, "snode-task-merge-exec", SStreamTaskExecReq, SStreamTaskExecRsp) + //TD_DEF_MSG_TYPE(TDMT_SND_TASK_EXEC, "snode-task-exec", SStreamTaskExecReq, SStreamTaskExecRsp) + //TD_DEF_MSG_TYPE(TDMT_SND_TASK_PIPE_EXEC, "snode-task-pipe-exec", SStreamTaskExecReq, SStreamTaskExecRsp) + //TD_DEF_MSG_TYPE(TDMT_SND_TASK_MERGE_EXEC, "snode-task-merge-exec", SStreamTaskExecReq, SStreamTaskExecRsp) + + TD_DEF_MSG_TYPE(TDMT_SND_TASK_RUN, "snode-stream-task-run", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_SND_TASK_DISPATCH, "snode-stream-task-dispatch", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_SND_TASK_RECOVER, "snode-stream-task-recover", NULL, NULL) // Requests handled by SCHEDULER TD_NEW_MSG_SEG(TDMT_SCH_MSG) diff --git a/include/common/ttokendef.h b/include/common/ttokendef.h index 68199fa51997d657b9de180ce6773c759d51c5a9..2fc524eeac39eefba6ce87c39d7bf4746fd83de1 100644 --- a/include/common/ttokendef.h +++ b/include/common/ttokendef.h @@ -93,166 +93,168 @@ #define TK_VGROUPS 75 #define TK_SINGLE_STABLE 76 #define TK_RETENTIONS 77 -#define TK_NK_COLON 78 -#define TK_TABLE 79 -#define TK_NK_LP 80 -#define TK_NK_RP 81 -#define TK_STABLE 82 -#define TK_ADD 83 -#define TK_COLUMN 84 -#define TK_MODIFY 85 -#define TK_RENAME 86 -#define TK_TAG 87 -#define TK_SET 88 -#define TK_NK_EQ 89 -#define TK_USING 90 -#define TK_TAGS 91 -#define TK_COMMENT 92 -#define TK_BOOL 93 -#define TK_TINYINT 94 -#define TK_SMALLINT 95 -#define TK_INT 96 -#define TK_INTEGER 97 -#define TK_BIGINT 98 -#define TK_FLOAT 99 -#define TK_DOUBLE 100 -#define TK_BINARY 101 -#define TK_TIMESTAMP 102 -#define TK_NCHAR 103 -#define TK_UNSIGNED 104 -#define TK_JSON 105 -#define TK_VARCHAR 106 -#define TK_MEDIUMBLOB 107 -#define TK_BLOB 108 -#define TK_VARBINARY 109 -#define TK_DECIMAL 110 -#define TK_DELAY 111 -#define TK_FILE_FACTOR 112 -#define TK_NK_FLOAT 113 -#define TK_ROLLUP 114 -#define TK_TTL 115 -#define TK_SMA 116 -#define TK_SHOW 117 -#define TK_DATABASES 118 -#define TK_TABLES 119 -#define TK_STABLES 120 -#define TK_MNODES 121 -#define TK_MODULES 122 -#define TK_QNODES 123 -#define TK_FUNCTIONS 124 -#define TK_INDEXES 125 -#define TK_ACCOUNTS 126 -#define TK_APPS 127 -#define TK_CONNECTIONS 128 -#define TK_LICENCE 129 -#define TK_GRANTS 130 -#define TK_QUERIES 131 -#define TK_SCORES 132 -#define TK_TOPICS 133 -#define TK_VARIABLES 134 -#define TK_BNODES 135 -#define TK_SNODES 136 -#define TK_CLUSTER 137 -#define TK_TRANSACTIONS 138 -#define TK_LIKE 139 -#define TK_INDEX 140 -#define TK_FULLTEXT 141 -#define TK_FUNCTION 142 -#define TK_INTERVAL 143 -#define TK_TOPIC 144 -#define TK_AS 145 -#define TK_WITH 146 -#define TK_SCHEMA 147 -#define TK_DESC 148 -#define TK_DESCRIBE 149 -#define TK_RESET 150 -#define TK_QUERY 151 -#define TK_CACHE 152 -#define TK_EXPLAIN 153 -#define TK_ANALYZE 154 -#define TK_VERBOSE 155 -#define TK_NK_BOOL 156 -#define TK_RATIO 157 -#define TK_COMPACT 158 -#define TK_VNODES 159 -#define TK_IN 160 -#define TK_OUTPUTTYPE 161 -#define TK_AGGREGATE 162 -#define TK_BUFSIZE 163 -#define TK_STREAM 164 -#define TK_INTO 165 -#define TK_TRIGGER 166 -#define TK_AT_ONCE 167 -#define TK_WINDOW_CLOSE 168 -#define TK_WATERMARK 169 -#define TK_KILL 170 -#define TK_CONNECTION 171 -#define TK_TRANSACTION 172 -#define TK_MERGE 173 -#define TK_VGROUP 174 -#define TK_REDISTRIBUTE 175 -#define TK_SPLIT 176 -#define TK_SYNCDB 177 -#define TK_NULL 178 -#define TK_NK_QUESTION 179 -#define TK_NK_ARROW 180 -#define TK_ROWTS 181 -#define TK_TBNAME 182 -#define TK_QSTARTTS 183 -#define TK_QENDTS 184 -#define TK_WSTARTTS 185 -#define TK_WENDTS 186 -#define TK_WDURATION 187 -#define TK_CAST 188 -#define TK_NOW 189 -#define TK_TODAY 190 -#define TK_TIMEZONE 191 -#define TK_COUNT 192 -#define TK_FIRST 193 -#define TK_LAST 194 -#define TK_LAST_ROW 195 -#define TK_BETWEEN 196 -#define TK_IS 197 -#define TK_NK_LT 198 -#define TK_NK_GT 199 -#define TK_NK_LE 200 -#define TK_NK_GE 201 -#define TK_NK_NE 202 -#define TK_MATCH 203 -#define TK_NMATCH 204 -#define TK_CONTAINS 205 -#define TK_JOIN 206 -#define TK_INNER 207 -#define TK_SELECT 208 -#define TK_DISTINCT 209 -#define TK_WHERE 210 -#define TK_PARTITION 211 -#define TK_BY 212 -#define TK_SESSION 213 -#define TK_STATE_WINDOW 214 -#define TK_SLIDING 215 -#define TK_FILL 216 -#define TK_VALUE 217 -#define TK_NONE 218 -#define TK_PREV 219 -#define TK_LINEAR 220 -#define TK_NEXT 221 -#define TK_GROUP 222 -#define TK_HAVING 223 -#define TK_ORDER 224 -#define TK_SLIMIT 225 -#define TK_SOFFSET 226 -#define TK_LIMIT 227 -#define TK_OFFSET 228 -#define TK_ASC 229 -#define TK_NULLS 230 -#define TK_ID 231 -#define TK_NK_BITNOT 232 -#define TK_INSERT 233 -#define TK_VALUES 234 -#define TK_IMPORT 235 -#define TK_NK_SEMI 236 -#define TK_FILE 237 +#define TK_SCHEMALESS 78 +#define TK_NK_COLON 79 +#define TK_TABLE 80 +#define TK_NK_LP 81 +#define TK_NK_RP 82 +#define TK_STABLE 83 +#define TK_ADD 84 +#define TK_COLUMN 85 +#define TK_MODIFY 86 +#define TK_RENAME 87 +#define TK_TAG 88 +#define TK_SET 89 +#define TK_NK_EQ 90 +#define TK_USING 91 +#define TK_TAGS 92 +#define TK_COMMENT 93 +#define TK_BOOL 94 +#define TK_TINYINT 95 +#define TK_SMALLINT 96 +#define TK_INT 97 +#define TK_INTEGER 98 +#define TK_BIGINT 99 +#define TK_FLOAT 100 +#define TK_DOUBLE 101 +#define TK_BINARY 102 +#define TK_TIMESTAMP 103 +#define TK_NCHAR 104 +#define TK_UNSIGNED 105 +#define TK_JSON 106 +#define TK_VARCHAR 107 +#define TK_MEDIUMBLOB 108 +#define TK_BLOB 109 +#define TK_VARBINARY 110 +#define TK_DECIMAL 111 +#define TK_DELAY 112 +#define TK_FILE_FACTOR 113 +#define TK_NK_FLOAT 114 +#define TK_ROLLUP 115 +#define TK_TTL 116 +#define TK_SMA 117 +#define TK_SHOW 118 +#define TK_DATABASES 119 +#define TK_TABLES 120 +#define TK_STABLES 121 +#define TK_MNODES 122 +#define TK_MODULES 123 +#define TK_QNODES 124 +#define TK_FUNCTIONS 125 +#define TK_INDEXES 126 +#define TK_ACCOUNTS 127 +#define TK_APPS 128 +#define TK_CONNECTIONS 129 +#define TK_LICENCE 130 +#define TK_GRANTS 131 +#define TK_QUERIES 132 +#define TK_SCORES 133 +#define TK_TOPICS 134 +#define TK_VARIABLES 135 +#define TK_BNODES 136 +#define TK_SNODES 137 +#define TK_CLUSTER 138 +#define TK_TRANSACTIONS 139 +#define TK_LIKE 140 +#define TK_INDEX 141 +#define TK_FULLTEXT 142 +#define TK_FUNCTION 143 +#define TK_INTERVAL 144 +#define TK_TOPIC 145 +#define TK_AS 146 +#define TK_CGROUP 147 +#define TK_WITH 148 +#define TK_SCHEMA 149 +#define TK_DESC 150 +#define TK_DESCRIBE 151 +#define TK_RESET 152 +#define TK_QUERY 153 +#define TK_CACHE 154 +#define TK_EXPLAIN 155 +#define TK_ANALYZE 156 +#define TK_VERBOSE 157 +#define TK_NK_BOOL 158 +#define TK_RATIO 159 +#define TK_COMPACT 160 +#define TK_VNODES 161 +#define TK_IN 162 +#define TK_OUTPUTTYPE 163 +#define TK_AGGREGATE 164 +#define TK_BUFSIZE 165 +#define TK_STREAM 166 +#define TK_INTO 167 +#define TK_TRIGGER 168 +#define TK_AT_ONCE 169 +#define TK_WINDOW_CLOSE 170 +#define TK_WATERMARK 171 +#define TK_KILL 172 +#define TK_CONNECTION 173 +#define TK_TRANSACTION 174 +#define TK_MERGE 175 +#define TK_VGROUP 176 +#define TK_REDISTRIBUTE 177 +#define TK_SPLIT 178 +#define TK_SYNCDB 179 +#define TK_NULL 180 +#define TK_NK_QUESTION 181 +#define TK_NK_ARROW 182 +#define TK_ROWTS 183 +#define TK_TBNAME 184 +#define TK_QSTARTTS 185 +#define TK_QENDTS 186 +#define TK_WSTARTTS 187 +#define TK_WENDTS 188 +#define TK_WDURATION 189 +#define TK_CAST 190 +#define TK_NOW 191 +#define TK_TODAY 192 +#define TK_TIMEZONE 193 +#define TK_COUNT 194 +#define TK_FIRST 195 +#define TK_LAST 196 +#define TK_LAST_ROW 197 +#define TK_BETWEEN 198 +#define TK_IS 199 +#define TK_NK_LT 200 +#define TK_NK_GT 201 +#define TK_NK_LE 202 +#define TK_NK_GE 203 +#define TK_NK_NE 204 +#define TK_MATCH 205 +#define TK_NMATCH 206 +#define TK_CONTAINS 207 +#define TK_JOIN 208 +#define TK_INNER 209 +#define TK_SELECT 210 +#define TK_DISTINCT 211 +#define TK_WHERE 212 +#define TK_PARTITION 213 +#define TK_BY 214 +#define TK_SESSION 215 +#define TK_STATE_WINDOW 216 +#define TK_SLIDING 217 +#define TK_FILL 218 +#define TK_VALUE 219 +#define TK_NONE 220 +#define TK_PREV 221 +#define TK_LINEAR 222 +#define TK_NEXT 223 +#define TK_GROUP 224 +#define TK_HAVING 225 +#define TK_ORDER 226 +#define TK_SLIMIT 227 +#define TK_SOFFSET 228 +#define TK_LIMIT 229 +#define TK_OFFSET 230 +#define TK_ASC 231 +#define TK_NULLS 232 +#define TK_ID 233 +#define TK_NK_BITNOT 234 +#define TK_INSERT 235 +#define TK_VALUES 236 +#define TK_IMPORT 237 +#define TK_NK_SEMI 238 +#define TK_FILE 239 #define TK_NK_SPACE 300 #define TK_NK_COMMENT 301 diff --git a/include/common/ttypes.h b/include/common/ttypes.h index 14428bfc432d4d74baad48bdce0832c2f138df6e..31cdb28690caeb6610d4b5e4ec6307952a0760aa 100644 --- a/include/common/ttypes.h +++ b/include/common/ttypes.h @@ -30,7 +30,7 @@ typedef uint64_t TDRowVerT; typedef int16_t col_id_t; typedef int8_t col_type_t; typedef int32_t col_bytes_t; -typedef uint16_t schema_ver_t; +typedef int32_t schema_ver_t; typedef int32_t func_id_t; #pragma pack(push, 1) diff --git a/include/dnode/mnode/mnode.h b/include/dnode/mnode/mnode.h index 28c470a4437d46cdaf213d857e51c6e67108d1d4..ab090940f218abe745fff2bfea170c9b6abf9248 100644 --- a/include/dnode/mnode/mnode.h +++ b/include/dnode/mnode/mnode.h @@ -29,6 +29,8 @@ extern "C" { typedef struct SMnode SMnode; typedef struct { + int32_t dnodeId; + bool standby; bool deploy; int8_t replica; int8_t selfIndex; @@ -53,15 +55,6 @@ SMnode *mndOpen(const char *path, const SMnodeOpt *pOption); */ void mndClose(SMnode *pMnode); -/** - * @brief Close a mnode. - * - * @param pMnode The mnode object to close. - * @param pOption Options of the mnode. - * @return int32_t 0 for success, -1 for failure. - */ -int32_t mndAlter(SMnode *pMnode, const SMnodeOpt *pOption); - /** * @brief Start mnode * @@ -88,7 +81,8 @@ int32_t mndGetLoad(SMnode *pMnode, SMnodeLoad *pLoad); * @param pMsg The request msg. * @return int32_t 0 for success, -1 for failure. */ -int32_t mndProcessMsg(SRpcMsg *pMsg); +int32_t mndProcessRpcMsg(SRpcMsg *pMsg); +int32_t mndProcessSyncMsg(SRpcMsg *pMsg); /** * @brief Generate machine code diff --git a/include/dnode/qnode/qnode.h b/include/dnode/qnode/qnode.h index 1ab101f705ac3f71fad134c200a22f903e4a8e86..90a952939577fc9cd945d0dc9fd8bde8d906667f 100644 --- a/include/dnode/qnode/qnode.h +++ b/include/dnode/qnode/qnode.h @@ -26,14 +26,17 @@ extern "C" { typedef struct SQnode SQnode; typedef struct { - int64_t numOfStartTask; - int64_t numOfStopTask; - int64_t numOfRecvedFetch; - int64_t numOfSentHb; - int64_t numOfSentFetch; - int64_t numOfTaskInQueue; + int64_t numOfProcessedQuery; + int64_t numOfProcessedCQuery; + int64_t numOfProcessedFetch; + int64_t numOfProcessedDrop; + int64_t memSizeInCache; + int64_t dataSizeSend; + int64_t dataSizeRecv; + int64_t numOfQueryInQueue; int64_t numOfFetchInQueue; - int64_t numOfErrors; + int64_t waitTimeInQueryQUeue; + int64_t waitTimeInFetchQUeue; } SQnodeLoad; typedef struct { @@ -71,10 +74,10 @@ int32_t qndGetLoad(SQnode *pQnode, SQnodeLoad *pLoad); * @param pQnode The qnode object. * @param pMsg The request message */ -int32_t qndProcessQueryMsg(SQnode *pQnode, SRpcMsg *pMsg); +int32_t qndProcessQueryMsg(SQnode *pQnode, int64_t ts, SRpcMsg *pMsg); #ifdef __cplusplus } #endif -#endif /*_TD_QNODE_H_*/ \ No newline at end of file +#endif /*_TD_QNODE_H_*/ diff --git a/include/libs/catalog/catalog.h b/include/libs/catalog/catalog.h index f9d8fc0de19fccd48a7d3e115f7e33e3a328ef03..8027b9394e0fd42c4c1d20a051868495130642f5 100644 --- a/include/libs/catalog/catalog.h +++ b/include/libs/catalog/catalog.h @@ -46,24 +46,42 @@ typedef enum { AUTH_TYPE_OTHER, } AUTH_TYPE; +typedef struct SUserAuthInfo { + char user[TSDB_USER_LEN]; + char dbFName[TSDB_DB_FNAME_LEN]; + AUTH_TYPE type; +} SUserAuthInfo; + +typedef struct SDbInfo { + int32_t vgVer; + int32_t tbNum; + int64_t dbId; +} SDbInfo; + typedef struct SCatalogReq { - SArray *pTableName; // element is SNAME - SArray *pUdf; // udf name + SArray *pDbVgroup; // element is db full name + SArray *pDbCfg; // element is db full name + SArray *pDbInfo; // element is db full name + SArray *pTableMeta; // element is SNAME + SArray *pTableHash; // element is SNAME + SArray *pUdf; // element is udf name + SArray *pIndex; // element is index name + SArray *pUser; // element is SUserAuthInfo bool qNodeRequired; // valid qnode } SCatalogReq; typedef struct SMetaData { - SArray *pTableMeta; // STableMeta array - SArray *pVgroupInfo; // SVgroupInfo list - SArray *pUdfList; // udf info list - SArray *pQnodeList; // qnode list, SArray + SArray *pDbVgroup; // SArray*> + SArray *pDbCfg; // SArray + SArray *pDbInfo; // SArray + SArray *pTableMeta; // SArray + SArray *pTableHash; // SArray + SArray *pUdfList; // SArray + SArray *pIndex; // SArray + SArray *pUser; // SArray + SArray *pQnodeList; // SArray } SMetaData; -typedef struct STbSVersion { - char* tbFName; - int32_t sver; -} STbSVersion; - typedef struct SCatalogCfg { uint32_t maxTblCacheNum; uint32_t maxDBCacheNum; @@ -88,6 +106,12 @@ typedef struct SDbVgVersion { int32_t numOfTable; // unit is TSDB_TABLE_NUM_UNIT } SDbVgVersion; +typedef struct STbSVersion { + char* tbFName; + int32_t sver; + int32_t tver; +} STbSVersion; + typedef struct SUserAuthVersion { char user[TSDB_USER_LEN]; int32_t version; @@ -96,6 +120,8 @@ typedef struct SUserAuthVersion { typedef SDbCfgRsp SDbCfgInfo; typedef SUserIndexRsp SIndexInfo; +typedef void (*catalogCallback)(SMetaData* pResult, void* param, int32_t code); + int32_t catalogInit(SCatalogCfg *cfg); /** @@ -131,7 +157,7 @@ int32_t catalogUpdateDBVgInfo(SCatalog* pCatalog, const char* dbName, uint64_t d int32_t catalogRemoveDB(SCatalog* pCatalog, const char* dbName, uint64_t dbId); -int32_t catalogRemoveTableMeta(SCatalog* pCtg, const SName* pTableName); +int32_t catalogRemoveTableMeta(SCatalog* pCtg, SName* pTableName); int32_t catalogRemoveStbMeta(SCatalog* pCtg, const char* dbFName, uint64_t dbId, const char* stbName, uint64_t suid); @@ -231,6 +257,8 @@ int32_t catalogGetTableHashVgroup(SCatalog* pCatalog, void * pTransporter, const */ int32_t catalogGetAllMeta(SCatalog* pCatalog, void *pTransporter, const SEpSet* pMgmtEps, const SCatalogReq* pReq, SMetaData* pRsp); +int32_t catalogAsyncGetAllMeta(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, uint64_t reqId, const SCatalogReq* pReq, catalogCallback fp, void* param, int64_t* jobId); + int32_t catalogGetQnodeList(SCatalog* pCatalog, void *pTransporter, const SEpSet* pMgmtEps, SArray* pQnodeList); int32_t catalogGetExpiredSTables(SCatalog* pCatalog, SSTableMetaVersion **stables, uint32_t *num); @@ -241,14 +269,19 @@ int32_t catalogGetExpiredUsers(SCatalog* pCtg, SUserAuthVersion **users, uint32_ int32_t catalogGetDBCfg(SCatalog* pCtg, void *pRpc, const SEpSet* pMgmtEps, const char* dbFName, SDbCfgInfo* pDbCfg); -int32_t catalogGetIndexInfo(SCatalog* pCtg, void *pRpc, const SEpSet* pMgmtEps, const char* indexName, SIndexInfo* pInfo); +int32_t catalogGetIndexMeta(SCatalog* pCtg, void *pRpc, const SEpSet* pMgmtEps, const char* indexName, SIndexInfo* pInfo); -int32_t catalogGetUdfInfo(SCatalog* pCtg, void *pRpc, const SEpSet* pMgmtEps, const char* funcName, SFuncInfo** pInfo); +int32_t catalogGetUdfInfo(SCatalog* pCtg, void *pRpc, const SEpSet* pMgmtEps, const char* funcName, SFuncInfo* pInfo); int32_t catalogChkAuth(SCatalog* pCtg, void *pRpc, const SEpSet* pMgmtEps, const char* user, const char* dbFName, AUTH_TYPE type, bool *pass); int32_t catalogUpdateUserAuthInfo(SCatalog* pCtg, SGetUserAuthRsp* pAuth); +int32_t catalogUpdateVgEpSet(SCatalog* pCtg, const char* dbFName, int32_t vgId, SEpSet *epSet); + + +int32_t ctgdLaunchAsyncCall(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, uint64_t reqId); + /** * Destroy catalog and relase all resources diff --git a/include/libs/command/command.h b/include/libs/command/command.h index 0cd566ee464dc23d0af5288281448c98204e6a2e..aee6b837837d7b3d9e3cbf37cde21c7a626c1a4f 100644 --- a/include/libs/command/command.h +++ b/include/libs/command/command.h @@ -24,7 +24,7 @@ int32_t qExecCommand(SNode* pStmt, SRetrieveTableRsp** pRsp); int32_t qExecStaticExplain(SQueryPlan *pDag, SRetrieveTableRsp **pRsp); int32_t qExecExplainBegin(SQueryPlan *pDag, SExplainCtx **pCtx, int64_t startTs); int32_t qExecExplainEnd(SExplainCtx *pCtx, SRetrieveTableRsp **pRsp); -int32_t qExplainUpdateExecInfo(SExplainCtx *pCtx, SExplainRsp *pRspMsg, int32_t groupId, SRetrieveTableRsp **pRsp); +int32_t qExplainUpdateExecInfo(SExplainCtx *pCtx, SExplainRsp *pRspMsg, int32_t groupId, SRetrieveTableRsp **pRsp); void qExplainFreeCtx(SExplainCtx *pCtx); diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h index 9cafb4ee04543f1978f68c982a5208fcde2c25a4..288248422b8288b98d8f0fccaef040186294cb76 100644 --- a/include/libs/executor/executor.h +++ b/include/libs/executor/executor.h @@ -61,7 +61,7 @@ qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, void* streamReadHandle); * @param type * @return */ -int32_t qSetStreamInput(qTaskInfo_t tinfo, const void* input, int32_t type); +int32_t qSetStreamInput(qTaskInfo_t tinfo, const void* input, int32_t type, bool assignUid); /** * Set multiple input data blocks for the stream scan. @@ -71,7 +71,7 @@ int32_t qSetStreamInput(qTaskInfo_t tinfo, const void* input, int32_t type); * @param type * @return */ -int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type); +int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type, bool assignUid); /** * Update the table id list, add or remove. @@ -156,18 +156,6 @@ int64_t qGetQueriedTableUid(qTaskInfo_t tinfo); */ int32_t qGetQualifiedTableIdList(void* pTableList, const char* tagCond, int32_t tagCondLen, SArray* pTableIdList); -/** - * Create the table group according to the group by tags info - * @param pTableIdList - * @param skey - * @param groupInfo - * @param groupByIndex - * @param numOfIndex - * @return - */ -// int32_t qCreateTableGroupByGroupExpr(SArray* pTableIdList, TSKEY skey, STableGroupInfo groupInfo, SColIndex* -// groupByIndex, int32_t numOfIndex); - /** * Update the table id list of a given query. * @param uid child table uid diff --git a/include/libs/function/function.h b/include/libs/function/function.h index 7d3e969c4119cc2e4eaf140188e0f85ee62bcc6e..e8cb363e08fa65385d36762face331f5de5cf1eb 100644 --- a/include/libs/function/function.h +++ b/include/libs/function/function.h @@ -39,6 +39,7 @@ typedef bool (*FExecInit)(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInf typedef int32_t (*FExecProcess)(struct SqlFunctionCtx *pCtx); typedef int32_t (*FExecFinalize)(struct SqlFunctionCtx *pCtx, SSDataBlock* pBlock); typedef int32_t (*FScalarExecProcess)(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput); +typedef int32_t (*FExecCombine)(struct SqlFunctionCtx *pDestCtx, struct SqlFunctionCtx *pSourceCtx); typedef struct SScalarFuncExecFuncs { FExecGetEnv getEnv; @@ -50,6 +51,7 @@ typedef struct SFuncExecFuncs { FExecInit init; FExecProcess process; FExecFinalize finalize; + FExecCombine combine; } SFuncExecFuncs; typedef struct SFileBlockInfo { @@ -59,56 +61,9 @@ typedef struct SFileBlockInfo { #define TSDB_BLOCK_DIST_STEP_ROWS 8 #define MAX_INTERVAL_TIME_WINDOW 1000000 // maximum allowed time windows in final results -#define FUNCTION_TYPE_SCALAR 1 -#define FUNCTION_TYPE_AGG 2 - #define TOP_BOTTOM_QUERY_LIMIT 100 #define FUNCTIONS_NAME_MAX_LENGTH 16 -#define FUNCTION_INVALID_ID -1 -#define FUNCTION_COUNT 0 -#define FUNCTION_SUM 1 -#define FUNCTION_AVG 2 -#define FUNCTION_MIN 3 -#define FUNCTION_MAX 4 -#define FUNCTION_STDDEV 5 -#define FUNCTION_PERCT 6 -#define FUNCTION_APERCT 7 -#define FUNCTION_FIRST 8 -#define FUNCTION_LAST 9 -#define FUNCTION_LAST_ROW 10 -#define FUNCTION_TOP 11 -#define FUNCTION_BOTTOM 12 -#define FUNCTION_SPREAD 13 -#define FUNCTION_TWA 14 -#define FUNCTION_LEASTSQR 15 - -#define FUNCTION_TS 16 -#define FUNCTION_TS_DUMMY 17 -#define FUNCTION_TAG_DUMMY 18 -#define FUNCTION_TS_COMP 19 - -#define FUNCTION_TAG 20 -#define FUNCTION_PRJ 21 - -#define FUNCTION_TAGPRJ 22 -#define FUNCTION_ARITHM 23 -#define FUNCTION_DIFF 24 - -#define FUNCTION_FIRST_DST 25 -#define FUNCTION_LAST_DST 26 -#define FUNCTION_STDDEV_DST 27 -#define FUNCTION_INTERP 28 - -#define FUNCTION_RATE 29 -#define FUNCTION_IRATE 30 -#define FUNCTION_TID_TAG 31 -#define FUNCTION_DERIVATIVE 32 -#define FUNCTION_BLKINFO 33 - - -#define FUNCTION_COV 38 - typedef struct SResultRowEntryInfo { bool initialized:1; // output buffer has been initialized bool complete:1; // query has completed @@ -178,10 +133,9 @@ typedef struct SqlFunctionCtx { char *pOutput; // final result output buffer, point to sdata->data int32_t numOfParams; SFunctParam *param; // input parameter, e.g., top(k, 20), the number of results for top query is kept in param - int64_t *ptsList; // corresponding timestamp array list + int64_t *ptsList; // corresponding timestamp array list, todo remove it SColumnInfoData *pTsOutput; // corresponding output buffer for timestamp of each result, e.g., top/bottom*/ int32_t offset; - SVariant tag; struct SResultRowEntryInfo *resultInfo; SSubsidiaryResInfo subsidiaries; SPoint1 start; @@ -208,9 +162,6 @@ enum { typedef struct tExprNode { int32_t nodeType; union { - SSchema *pSchema;// column node - struct SVariant *pVal; // value node - struct {// function node char functionName[FUNCTIONS_NAME_MAX_LENGTH]; // todo refactor int32_t functionId; @@ -253,47 +204,23 @@ struct SScalarParam { int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, SResultDataInfo* pInfo, int16_t extLength, bool isSuperTable); -bool qIsValidUdf(SArray* pUdfInfo, const char* name, int32_t len, int32_t* functionId); - void resetResultRowEntryResult(SqlFunctionCtx* pCtx, int32_t num); void cleanupResultRowEntry(struct SResultRowEntryInfo* pCell); int32_t getNumOfResult(SqlFunctionCtx* pCtx, int32_t num, SSDataBlock* pResBlock); bool isRowEntryCompleted(struct SResultRowEntryInfo* pEntry); bool isRowEntryInitialized(struct SResultRowEntryInfo* pEntry); -/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// fill api -struct SFillInfo; -struct SFillColInfo; - typedef struct SPoint { int64_t key; void * val; } SPoint; -//void taosFillSetStartInfo(struct SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey); -//void taosResetFillInfo(struct SFillInfo* pFillInfo, TSKEY startTimestamp); -//void taosFillSetInputDataBlock(struct SFillInfo* pFillInfo, const struct SSDataBlock* pInput); -//struct SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfOutput, const SValueNode* val); -//bool taosFillHasMoreResults(struct SFillInfo* pFillInfo); -// -//struct SFillInfo* taosCreateFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols, -// SInterval* pInterval, int32_t fillType, -// struct SFillColInfo* pCol, const char* id); -// -//void* taosDestroyFillInfo(struct SFillInfo *pFillInfo); -//int64_t taosFillResultDataBlock(struct SFillInfo* pFillInfo, void** output, int32_t capacity); -//int64_t getFillInfoStart(struct SFillInfo *pFillInfo); - int32_t taosGetLinearInterpolationVal(SPoint* point, int32_t outputType, SPoint* point1, SPoint* point2, int32_t inputType); /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // udf api struct SUdfInfo; -void qAddUdfInfo(uint64_t id, struct SUdfInfo* pUdfInfo); -void qRemoveUdfInfo(uint64_t id, struct SUdfInfo* pUdfInfo); - /** * create udfd proxy, called once in process that call doSetupUdf/callUdfxxx/doTeardownUdf * @return error code diff --git a/include/libs/function/functionMgt.h b/include/libs/function/functionMgt.h index 89fbc92992bf783b1d8896fbf636f2468b6fa4c6..922136b590cb007c6acd040c7ce81d135c0dad4f 100644 --- a/include/libs/function/functionMgt.h +++ b/include/libs/function/functionMgt.h @@ -23,6 +23,9 @@ extern "C" { #include "function.h" #include "querynodes.h" +#define FUNC_AGGREGATE_UDF_ID 5001 +#define FUNC_SCALAR_UDF_ID 5002 + typedef enum EFunctionType { // aggregate function FUNCTION_TYPE_APERCENTILE = 1, @@ -126,27 +129,19 @@ typedef enum EFunctionType { struct SqlFunctionCtx; struct SResultRowEntryInfo; struct STimeWindow; -struct SCatalog; - -typedef struct SFmGetFuncInfoParam { - struct SCatalog* pCtg; - void* pRpc; - const SEpSet* pMgmtEps; - char* pErrBuf; - int32_t errBufLen; -} SFmGetFuncInfoParam; int32_t fmFuncMgtInit(); void fmFuncMgtDestroy(); -int32_t fmGetFuncInfo(SFmGetFuncInfoParam* pParam, SFunctionNode* pFunc); +int32_t fmGetFuncInfo(SFunctionNode* pFunc, char* pMsg, int32_t msgLen); bool fmIsBuiltinFunc(const char* pFunc); bool fmIsAggFunc(int32_t funcId); bool fmIsScalarFunc(int32_t funcId); -bool fmIsNonstandardSQLFunc(int32_t funcId); +bool fmIsVectorFunc(int32_t funcId); +bool fmIsIndefiniteRowsFunc(int32_t funcId); bool fmIsStringFunc(int32_t funcId); bool fmIsDatetimeFunc(int32_t funcId); bool fmIsSelectFunc(int32_t funcId); diff --git a/include/libs/index/index.h b/include/libs/index/index.h index fa4cb1d2bdae0c24b2293f0b376d30f1d4175fc7..c3d31ffe3853d76d6ab6803dfc10f54dad2445c6 100644 --- a/include/libs/index/index.h +++ b/include/libs/index/index.h @@ -16,9 +16,11 @@ #ifndef _TD_INDEX_H_ #define _TD_INDEX_H_ +#include "nodes.h" #include "os.h" #include "taoserror.h" #include "tarray.h" +#include "tglobal.h" #ifdef __cplusplus extern "C" { @@ -189,6 +191,17 @@ void indexTermDestroy(SIndexTerm* p); */ void indexInit(); +/* index filter */ +typedef struct SIndexMetaArg { + void* metaHandle; + uint64_t suid; +} SIndexMetaArg; + +typedef enum { SFLT_NOT_INDEX, SFLT_COARSE_INDEX, SFLT_ACCURATE_INDEX } SIdxFltStatus; + +SIdxFltStatus idxGetFltStatus(SNode* pFilterNode); + +int32_t doFilterTag(const SNode* pFilterNode, SIndexMetaArg* metaArg, SArray* result); /* * destory index env * diff --git a/include/libs/nodes/cmdnodes.h b/include/libs/nodes/cmdnodes.h index 82bf4e1f45a0cab5c7f1b61d04e08d137148e44d..7bd3a40c7199f204bd14e5af3231e59d5b7383be 100644 --- a/include/libs/nodes/cmdnodes.h +++ b/include/libs/nodes/cmdnodes.h @@ -50,6 +50,7 @@ typedef struct SDatabaseOptions { int32_t numOfVgroups; int8_t singleStable; SNodeList* pRetentions; + int8_t schemaless; } SDatabaseOptions; typedef struct SCreateDatabaseStmt { @@ -260,6 +261,13 @@ typedef struct SDropTopicStmt { bool ignoreNotExists; } SDropTopicStmt; +typedef struct SDropCGroupStmt { + ENodeType type; + char topicName[TSDB_TABLE_NAME_LEN]; + char cgroup[TSDB_CGROUP_LEN]; + bool ignoreNotExists; +} SDropCGroupStmt; + typedef struct SAlterLocalStmt { ENodeType type; char config[TSDB_DNODE_CONFIG_LEN]; diff --git a/include/libs/nodes/nodes.h b/include/libs/nodes/nodes.h index 291e08fdbf2ba28a6a5ea1c0d71d64f6e00a6029..38602667252e429eb9840c75d2c23b98139df184 100644 --- a/include/libs/nodes/nodes.h +++ b/include/libs/nodes/nodes.h @@ -59,10 +59,10 @@ extern "C" { for (SListCell* cell = (NULL != (list) ? (list)->pHead : NULL); \ (NULL != cell ? (node = &(cell->pNode), true) : (node = NULL, false)); cell = cell->pNext) -#define DESTORY_LIST(list) \ - do { \ - nodesDestroyList(list); \ - list = NULL; \ +#define DESTORY_LIST(list) \ + do { \ + nodesDestroyList((list)); \ + (list) = NULL; \ } while (0) typedef enum ENodeType { @@ -96,6 +96,7 @@ typedef enum ENodeType { QUERY_NODE_EXPLAIN_OPTIONS, QUERY_NODE_STREAM_OPTIONS, QUERY_NODE_TOPIC_OPTIONS, + QUERY_NODE_LEFT_VALUE, // Statement nodes are used in parser and planner module. QUERY_NODE_SET_OPERATOR, @@ -130,6 +131,7 @@ typedef enum ENodeType { QUERY_NODE_DROP_MNODE_STMT, QUERY_NODE_CREATE_TOPIC_STMT, QUERY_NODE_DROP_TOPIC_STMT, + QUERY_NODE_DROP_CGROUP_STMT, QUERY_NODE_ALTER_LOCAL_STMT, QUERY_NODE_EXPLAIN_STMT, QUERY_NODE_DESCRIBE_STMT, @@ -211,6 +213,8 @@ typedef enum ENodeType { QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL, QUERY_NODE_PHYSICAL_PLAN_FILL, QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW, + QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW, + QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION_WINDOW, QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW, QUERY_NODE_PHYSICAL_PLAN_PARTITION, QUERY_NODE_PHYSICAL_PLAN_DISPATCH, @@ -241,7 +245,6 @@ typedef struct SNodeList { #define SNodeptr void* -int32_t nodesNodeSize(ENodeType type); SNodeptr nodesMakeNode(ENodeType type); void nodesDestroyNode(SNodeptr pNode); diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 7ca4ca917297e5042ae87c9aaba244a881e6f0a3..2648a468dd3fa82fe91825d60b739387d9255bd7 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -31,6 +31,7 @@ typedef struct SLogicNode { SNodeList* pChildren; struct SLogicNode* pParent; int32_t optimizedFlag; + uint8_t precision; } SLogicNode; typedef enum EScanType { SCAN_TYPE_TAG = 1, SCAN_TYPE_TABLE, SCAN_TYPE_SYSTEM_TABLE, SCAN_TYPE_STREAM } EScanType; @@ -54,12 +55,17 @@ typedef struct SScanLogicNode { int64_t sliding; int8_t intervalUnit; int8_t slidingUnit; + SNode* pTagCond; + int8_t triggerType; + int64_t watermark; + int16_t tsColId; } SScanLogicNode; typedef struct SJoinLogicNode { SLogicNode node; EJoinType joinType; SNode* pOnConditions; + bool isSingleTableJoin; } SJoinLogicNode; typedef struct SAggLogicNode { @@ -213,6 +219,9 @@ typedef struct STableScanPhysiNode { int64_t sliding; int8_t intervalUnit; int8_t slidingUnit; + int8_t triggerType; + int64_t watermark; + int16_t tsColId; } STableScanPhysiNode; typedef STableScanPhysiNode STableSeqScanPhysiNode; @@ -295,6 +304,8 @@ typedef struct SSessionWinodwPhysiNode { int64_t gap; } SSessionWinodwPhysiNode; +typedef SSessionWinodwPhysiNode SStreamSessionWinodwPhysiNode; + typedef struct SStateWinodwPhysiNode { SWinodwPhysiNode window; SNode* pStateKey; @@ -343,6 +354,7 @@ typedef struct SSubplan { SNodeList* pParents; // the data destination subplan, get data from current subplan SPhysiNode* pNode; // physical plan of current subplan SDataSinkNode* pDataSink; // data of the subplan flow into the datasink + SNode* pTagCond; } SSubplan; typedef enum EExplainMode { EXPLAIN_MODE_DISABLE = 1, EXPLAIN_MODE_STATIC, EXPLAIN_MODE_ANALYZE } EExplainMode; diff --git a/include/libs/nodes/querynodes.h b/include/libs/nodes/querynodes.h index 298dffcc839e22226a89932b2571a90ffaa197d0..ab5e10dc2ab632b57f9d6f313291d8cb5b04c6b0 100644 --- a/include/libs/nodes/querynodes.h +++ b/include/libs/nodes/querynodes.h @@ -81,6 +81,7 @@ typedef struct SValueNode { char* literal; bool isDuration; bool translate; + bool notReserved; int16_t placeholderNo; union { bool b; @@ -93,6 +94,10 @@ typedef struct SValueNode { char unit; } SValueNode; +typedef struct SLeftValueNode { + ENodeType type; +} SLeftValueNode; + typedef struct SOperatorNode { SExprNode node; // QUERY_NODE_OPERATOR EOperatorType opType; @@ -127,6 +132,7 @@ typedef struct STableNode { char tableName[TSDB_TABLE_NAME_LEN]; char tableAlias[TSDB_TABLE_NAME_LEN]; uint8_t precision; + bool singleTable; } STableNode; struct STableMeta; @@ -236,7 +242,9 @@ typedef struct SSelectStmt { bool isTimeOrderQuery; bool hasAggFuncs; bool hasRepeatScanFuncs; - bool hasNonstdSQLFunc; + bool hasIndefiniteRowsFunc; + bool hasSelectFunc; + bool hasSelectValFunc; } SSelectStmt; typedef enum ESetOperatorType { SET_OP_TYPE_UNION_ALL = 1, SET_OP_TYPE_UNION } ESetOperatorType; @@ -314,21 +322,22 @@ typedef enum EQueryExecMode { } EQueryExecMode; typedef struct SQuery { - ENodeType type; - EQueryExecMode execMode; - bool haveResultSet; - SNode* pRoot; - int32_t numOfResCols; - SSchema* pResSchema; - int8_t precision; - SCmdMsgInfo* pCmdMsg; - int32_t msgType; - SArray* pDbList; - SArray* pTableList; - bool showRewrite; - int32_t placeholderNum; - SArray* pPlaceholderValues; - SNode* pPrepareRoot; + ENodeType type; + EQueryExecMode execMode; + bool haveResultSet; + SNode* pRoot; + int32_t numOfResCols; + SSchema* pResSchema; + int8_t precision; + SCmdMsgInfo* pCmdMsg; + int32_t msgType; + SArray* pDbList; + SArray* pTableList; + bool showRewrite; + int32_t placeholderNum; + SArray* pPlaceholderValues; + SNode* pPrepareRoot; + struct SParseMetaCache* pMetaCache; } SQuery; void nodesWalkSelectStmt(SSelectStmt* pSelect, ESqlClause clause, FNodeWalker walker, void* pContext); diff --git a/include/libs/parser/parser.h b/include/libs/parser/parser.h index 2d8fd9a93cadc1275e937ade9a8b859dcebe7dc9..06272b81514cec2a294da513ec2a57447ad74ef1 100644 --- a/include/libs/parser/parser.h +++ b/include/libs/parser/parser.h @@ -23,6 +23,9 @@ extern "C" { #include "query.h" #include "querynodes.h" +struct SCatalogReq; +struct SMetaData; + typedef struct SStmtCallback { TAOS_STMT* pStmt; int32_t (*getTbNameFn)(TAOS_STMT*, char**); @@ -45,14 +48,21 @@ typedef struct SParseContext { SStmtCallback* pStmtCb; const char* pUser; bool isSuperUser; + bool async; } SParseContext; int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery); -bool isInsertSql(const char* pStr, size_t length); +bool qIsInsertSql(const char* pStr, size_t length); + +// for async mode +int32_t qParseSqlSyntax(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq* pCatalogReq); +int32_t qAnalyseSqlSemantic(SParseContext* pCxt, const struct SCatalogReq* pCatalogReq, + const struct SMetaData* pMetaData, SQuery* pQuery); void qDestroyQuery(SQuery* pQueryNode); int32_t qExtractResultSchema(const SNode* pRoot, int32_t* numOfCols, SSchema** pSchema); +int32_t qSetSTableIdForRSma(SNode* pStmt, int64_t uid); int32_t qBuildStmtOutput(SQuery* pQuery, SHashObj* pVgHash, SHashObj* pBlockHash); int32_t qResetStmtDataBlock(void* block, bool keepBuf); diff --git a/include/libs/qcom/query.h b/include/libs/qcom/query.h index 68a1e08f518f5c5e230076cd56344ea1161804cb..296b18e8dea7524244dcd8ade1a1149bfe97533d 100644 --- a/include/libs/qcom/query.h +++ b/include/libs/qcom/query.h @@ -43,6 +43,12 @@ typedef enum { TASK_TYPE_TEMP, } ETaskType; +typedef enum { + TARGET_TYPE_MNODE = 1, + TARGET_TYPE_VNODE, + TARGET_TYPE_OTHER, +} ETargetType; + typedef struct STableComInfo { uint8_t numOfTags; // the number of tags in schema uint8_t precision; // the number of precision @@ -126,11 +132,18 @@ typedef struct SDataBuf { void* handle; } SDataBuf; +typedef struct STargetInfo { + ETargetType type; + char dbFName[TSDB_DB_FNAME_LEN]; // used to update db's vgroup epset + int32_t vgId; +} STargetInfo; + typedef int32_t (*__async_send_cb_fn_t)(void* param, const SDataBuf* pMsg, int32_t code); typedef int32_t (*__async_exec_fn_t)(void* param); typedef struct SMsgSendInfo { __async_send_cb_fn_t fp; // async callback function + STargetInfo target; // for update epset void* param; uint64_t requestId; uint64_t requestObjRefId; @@ -180,7 +193,7 @@ char* jobTaskStatusStr(int32_t status); SSchema createSchema(int8_t type, int32_t bytes, col_id_t colId, const char* name); -extern int32_t (*queryBuildMsg[TDMT_MAX])(void* input, char** msg, int32_t msgSize, int32_t* msgLen); +extern int32_t (*queryBuildMsg[TDMT_MAX])(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void*(*mallocFp)(int32_t)); extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t msgSize); #define SET_META_TYPE_NULL(t) (t) = META_TYPE_NULL_TABLE @@ -191,7 +204,7 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t #define NEED_CLIENT_RM_TBLMETA_ERROR(_code) \ ((_code) == TSDB_CODE_PAR_TABLE_NOT_EXIST || (_code) == TSDB_CODE_VND_TB_NOT_EXIST || \ (_code) == TSDB_CODE_PAR_INVALID_COLUMNS_NUM || (_code) == TSDB_CODE_PAR_INVALID_COLUMN || \ - (_code) == TSDB_CODE_PAR_TAGS_NOT_MATCHED) + (_code) == TSDB_CODE_PAR_TAGS_NOT_MATCHED || (_code == TSDB_CODE_PAR_VALUE_TOO_LONG)) #define NEED_CLIENT_REFRESH_VG_ERROR(_code) \ ((_code) == TSDB_CODE_VND_HASH_MISMATCH || (_code) == TSDB_CODE_VND_INVALID_VGROUP_ID) #define NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code) ((_code) == TSDB_CODE_TDB_TABLE_RECREATED) diff --git a/include/libs/qworker/qworker.h b/include/libs/qworker/qworker.h index 0846841cef1b509edf2ccc189bf9e81453169aa1..5942d00cb212002d5309cec4cba253dc7e3d7388 100644 --- a/include/libs/qworker/qworker.h +++ b/include/libs/qworker/qworker.h @@ -52,32 +52,24 @@ typedef struct { int32_t qWorkerInit(int8_t nodeType, int32_t nodeId, SQWorkerCfg *cfg, void **qWorkerMgmt, const SMsgCb *pMsgCb); -int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); +int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts); -int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); +int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts); -int32_t qWorkerProcessDataSinkMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); +int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts); -int32_t qWorkerProcessReadyMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); +int32_t qWorkerProcessFetchRsp(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts); -int32_t qWorkerProcessStatusMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); +int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts); -int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); +int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts); -int32_t qWorkerProcessFetchRsp(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); - -int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); - -int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); - -int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); - -int32_t qWorkerProcessShowMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); - -int32_t qWorkerProcessShowFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); +int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts); void qWorkerDestroy(void **qWorkerMgmt); +int64_t qWorkerGetWaitTimeInQueue(void *qWorkerMgmt, EQueueType type); + #ifdef __cplusplus } #endif diff --git a/include/libs/scheduler/scheduler.h b/include/libs/scheduler/scheduler.h index dcd058a293f0a35080335b30b38e32a792c43a74..0d32cce20b6e489249fa79080e6144754c17218b 100644 --- a/include/libs/scheduler/scheduler.h +++ b/include/libs/scheduler/scheduler.h @@ -23,6 +23,8 @@ extern "C" { #include "catalog.h" #include "planner.h" +extern tsem_t schdRspSem; + typedef struct SSchedulerCfg { uint32_t maxJobNum; int32_t maxNodeTableNum; @@ -54,8 +56,6 @@ typedef struct SQueryProfileSummary { typedef struct SQueryResult { int32_t code; uint64_t numOfRows; - int32_t msgSize; - char *msg; void *res; } SQueryResult; @@ -64,6 +64,15 @@ typedef struct STaskInfo { SSubQueryMsg *msg; } STaskInfo; +typedef struct SSchdFetchParam { + void **pData; + int32_t* code; +} SSchdFetchParam; + +typedef void (*schedulerExecCallback)(SQueryResult* pResult, void* param, int32_t code); +typedef void (*schedulerFetchCallback)(void* pResult, void* param, int32_t code); + + int32_t schedulerInit(SSchedulerCfg *cfg); /** @@ -80,7 +89,8 @@ int32_t schedulerExecJob(void *transport, SArray *nodeList, SQueryPlan *pDag, in * @param pNodeList Qnode/Vnode address list, element is SQueryNodeAddr * @return */ -int32_t schedulerAsyncExecJob(void *transport, SArray *pNodeList, SQueryPlan* pDag, const char* sql, int64_t *pJob); + int32_t schedulerAsyncExecJob(void *pTrans, SArray *pNodeList, SQueryPlan *pDag, int64_t *pJob, const char *sql, + int64_t startTs, schedulerExecCallback fp, void* param); /** * Fetch query result from the remote query executor @@ -90,6 +100,8 @@ int32_t schedulerAsyncExecJob(void *transport, SArray *pNodeList, SQueryPlan* pD */ int32_t schedulerFetchRows(int64_t job, void **data); +int32_t schedulerAsyncFetchRows(int64_t job, schedulerFetchCallback fp, void* param); + int32_t schedulerGetTasksStatus(int64_t job, SArray *pSub); @@ -108,23 +120,8 @@ void schedulerFreeJob(int64_t job); void schedulerDestroy(void); -/** - * convert dag to task list - * @param pDag - * @param pTasks SArray** - * @return - */ -int32_t schedulerConvertDagToTaskList(SQueryPlan* pDag, SArray **pTasks); - -/** - * make one task info's multiple copies - * @param src - * @param dst SArray** - * @return - */ -int32_t schedulerCopyTask(STaskInfo *src, SArray **dst, int32_t copyNum); - -void schedulerFreeTaskList(SArray *taskList); +void schdExecCallback(SQueryResult* pResult, void* param, int32_t code); +void schdFetchCallback(void* pResult, void* param, int32_t code); #ifdef __cplusplus diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 1604749af8cb8f3073bd4b1ef46e30d45a37ddff..8aaf9a79dc5af256cfe089d8fc5f7b12856d2e71 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -84,15 +84,16 @@ typedef struct { } SStreamCheckpoint; static FORCE_INLINE SStreamDataSubmit* streamDataSubmitNew(SSubmitReq* pReq) { - SStreamDataSubmit* pDataSubmit = (SStreamDataSubmit*)taosMemoryCalloc(1, sizeof(SStreamDataSubmit)); + SStreamDataSubmit* pDataSubmit = (SStreamDataSubmit*)taosAllocateQitem(sizeof(SStreamDataSubmit), DEF_QITEM); if (pDataSubmit == NULL) return NULL; - pDataSubmit->data = pReq; pDataSubmit->dataRef = (int32_t*)taosMemoryMalloc(sizeof(int32_t)); - if (pDataSubmit->data == NULL) goto FAIL; + if (pDataSubmit->dataRef == NULL) goto FAIL; + pDataSubmit->data = pReq; *pDataSubmit->dataRef = 1; + pDataSubmit->type = STREAM_INPUT__DATA_SUBMIT; return pDataSubmit; FAIL: - taosMemoryFree(pDataSubmit); + taosFreeQitem(pDataSubmit); return NULL; } @@ -107,24 +108,18 @@ static FORCE_INLINE void streamDataSubmitRefDec(SStreamDataSubmit* pDataSubmit) if (ref == 0) { taosMemoryFree(pDataSubmit->data); taosMemoryFree(pDataSubmit->dataRef); - // taosFreeQitem(pDataSubmit); } } int32_t streamDataBlockEncode(void** buf, const SStreamDataBlock* pOutput); void* streamDataBlockDecode(const void* buf, SStreamDataBlock* pInput); -typedef struct { - void* inputHandle; - void* executor; -} SStreamRunner; - typedef struct { int8_t parallelizable; char* qmsg; // followings are not applicable to encoder and decoder - int8_t numOfRunners; - SStreamRunner* runners; + void* inputHandle; + void* executor; } STaskExec; typedef struct { @@ -147,6 +142,7 @@ typedef void FTbSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data); typedef struct { int64_t stbUid; + char stbFullName[TSDB_TABLE_FNAME_LEN]; SSchemaWrapper* pSchemaWrapper; // not applicable to encoder and decoder void* vnode; @@ -320,17 +316,15 @@ int32_t streamEnqueueDataSubmit(SStreamTask* pTask, SStreamDataSubmit* input); int32_t streamEnqueueDataBlk(SStreamTask* pTask, SStreamDataBlock* input); int32_t streamDequeueOutput(SStreamTask* pTask, void** output); -int32_t streamExecTask(SStreamTask* pTask, SMsgCb* pMsgCb, const void* input, int32_t inputType, int32_t workId); - int32_t streamTaskRun(SStreamTask* pTask); int32_t streamTaskHandleInput(SStreamTask* pTask, void* data); int32_t streamTaskProcessRunReq(SStreamTask* pTask, SMsgCb* pMsgCb); -int32_t streamTaskProcessDispatchReq(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamDispatchReq* pReq, SRpcMsg* pMsg); -int32_t streamTaskProcessDispatchRsp(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamDispatchRsp* pRsp); -int32_t streamTaskProcessRecoverReq(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamTaskRecoverReq* pReq, SRpcMsg* pMsg); -int32_t streamTaskProcessRecoverRsp(SStreamTask* pTask, SStreamTaskRecoverRsp* pRsp); +int32_t streamProcessDispatchReq(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamDispatchReq* pReq, SRpcMsg* pMsg); +int32_t streamProcessDispatchRsp(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamDispatchRsp* pRsp); +int32_t streamProcessRecoverReq(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamTaskRecoverReq* pReq, SRpcMsg* pMsg); +int32_t streamProcessRecoverRsp(SStreamTask* pTask, SStreamTaskRecoverRsp* pRsp); #ifdef __cplusplus } diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h index 9b6593e4b5bb4c8aad5018b3b92f73c7e1d52794..a587ad6ef22fb80538147a61980ae4cdadd8ec03 100644 --- a/include/libs/sync/sync.h +++ b/include/libs/sync/sync.h @@ -66,27 +66,53 @@ typedef struct SSyncCfg { SNodeInfo nodeInfo[TSDB_MAX_REPLICA]; } SSyncCfg; -typedef struct SSnapshot { - void* data; - SyncIndex lastApplyIndex; - SyncTerm lastApplyTerm; -} SSnapshot; - typedef struct SFsmCbMeta { SyncIndex index; bool isWeak; int32_t code; ESyncState state; uint64_t seqNum; + SyncTerm term; + SyncTerm currentTerm; + uint64_t flag; } SFsmCbMeta; +typedef struct SReConfigCbMeta { + int32_t code; + SyncIndex index; + SyncTerm term; + SyncTerm currentTerm; + SSyncCfg oldCfg; + bool isDrop; + uint64_t flag; +} SReConfigCbMeta; + +typedef struct SSnapshot { + void *data; + SyncIndex lastApplyIndex; + SyncTerm lastApplyTerm; +} SSnapshot; + typedef struct SSyncFSM { void* data; + void (*FpCommitCb)(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta); void (*FpPreCommitCb)(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta); void (*FpRollBackCb)(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta); + + void (*FpRestoreFinishCb)(struct SSyncFSM* pFsm); + void (*FpReConfigCb)(struct SSyncFSM* pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta); + int32_t (*FpGetSnapshot)(struct SSyncFSM* pFsm, SSnapshot* pSnapshot); - int32_t (*FpRestoreSnapshot)(struct SSyncFSM* pFsm, const SSnapshot* snapshot); + + int32_t (*FpSnapshotStartRead)(struct SSyncFSM* pFsm, void** ppReader); + int32_t (*FpSnapshotStopRead)(struct SSyncFSM* pFsm, void* pReader); + int32_t (*FpSnapshotDoRead)(struct SSyncFSM* pFsm, void* pReader, void** ppBuf, int32_t* len); + + int32_t (*FpSnapshotStartWrite)(struct SSyncFSM* pFsm, void** ppWriter); + int32_t (*FpSnapshotStopWrite)(struct SSyncFSM* pFsm, void* pWriter, bool isApply); + int32_t (*FpSnapshotDoWrite)(struct SSyncFSM* pFsm, void* pWriter, void* pBuf, int32_t len); + } SSyncFSM; // abstract definition of log store in raft @@ -117,8 +143,8 @@ typedef struct SSyncLogStore { } SSyncLogStore; - typedef struct SSyncInfo { + bool isStandBy; SyncGroupId vgId; SSyncCfg syncCfg; char path[TSDB_FILENAME_LEN]; @@ -133,8 +159,8 @@ int32_t syncInit(); void syncCleanUp(); int64_t syncOpen(const SSyncInfo* pSyncInfo); void syncStart(int64_t rid); -void syncStartStandBy(int64_t rid); void syncStop(int64_t rid); +int32_t syncSetStandby(int64_t rid); int32_t syncReconfig(int64_t rid, const SSyncCfg* pSyncCfg); ESyncState syncGetMyRole(int64_t rid); const char* syncGetMyRoleStr(int64_t rid); @@ -144,6 +170,11 @@ int32_t syncGetVgId(int64_t rid); int32_t syncPropose(int64_t rid, const SRpcMsg* pMsg, bool isWeak); bool syncEnvIsStart(); const char* syncStr(ESyncState state); +bool syncIsRestoreFinish(int64_t rid); + +// to be moved to static +void syncStartNormal(int64_t rid); +void syncStartStandBy(int64_t rid); #ifdef __cplusplus } diff --git a/include/libs/transport/trpc.h b/include/libs/transport/trpc.h index fcb00ddf019d09866cba28d7865d800d970bf1f4..839194da94e5a184ab11b446077e334f085d68b5 100644 --- a/include/libs/transport/trpc.h +++ b/include/libs/transport/trpc.h @@ -28,7 +28,7 @@ extern "C" { #define TAOS_CONN_CLIENT 1 #define IsReq(pMsg) (pMsg->msgType & 1U) -extern int tsRpcHeadSize; +extern int32_t tsRpcHeadSize; typedef struct { uint32_t clientIp; @@ -69,10 +69,10 @@ typedef struct SRpcInit { char localFqdn[TSDB_FQDN_LEN]; uint16_t localPort; // local port char * label; // for debug purpose - int numOfThreads; // number of threads to handle connections - int sessions; // number of sessions allowed + int32_t numOfThreads; // number of threads to handle connections + int32_t sessions; // number of sessions allowed int8_t connType; // TAOS_CONN_UDP, TAOS_CONN_TCPC, TAOS_CONN_TCPS - int idleTime; // milliseconds, 0 means idle timer is disabled + int32_t idleTime; // milliseconds, 0 means idle timer is disabled // the following is for client app ecurity only char *user; // user name @@ -89,28 +89,27 @@ typedef struct SRpcInit { typedef struct { void *val; int32_t (*clone)(void *src, void **dst); - void (*freeFunc)(const void *arg); } SRpcCtxVal; typedef struct { int32_t msgType; void * val; int32_t (*clone)(void *src, void **dst); - void (*freeFunc)(const void *arg); } SRpcBrokenlinkVal; typedef struct { SHashObj * args; SRpcBrokenlinkVal brokenVal; + void (*freeFunc)(const void *arg); } SRpcCtx; int32_t rpcInit(); void rpcCleanup(); void * rpcOpen(const SRpcInit *pRpc); void rpcClose(void *); -void * rpcMallocCont(int contLen); +void * rpcMallocCont(int32_t contLen); void rpcFreeCont(void *pCont); -void * rpcReallocCont(void *ptr, int contLen); +void * rpcReallocCont(void *ptr, int32_t contLen); // Because taosd supports multi-process mode // These functions should not be used on the server side @@ -121,10 +120,11 @@ void rpcRegisterBrokenLinkArg(SRpcMsg *msg); void rpcReleaseHandle(void *handle, int8_t type); // just release client conn to rpc instance, no close sock // These functions will not be called in the child process -void rpcSendRedirectRsp(void *pConn, const SEpSet *pEpSet); -void rpcSendRequestWithCtx(void *thandle, const SEpSet *pEpSet, SRpcMsg *pMsg, int64_t *rid, SRpcCtx *ctx); -int rpcGetConnInfo(void *thandle, SRpcConnInfo *pInfo); -void rpcSendRecv(void *shandle, SEpSet *pEpSet, SRpcMsg *pReq, SRpcMsg *pRsp); +void rpcSendRedirectRsp(void *pConn, const SEpSet *pEpSet); +void rpcSendRequestWithCtx(void *thandle, const SEpSet *pEpSet, SRpcMsg *pMsg, int64_t *rid, SRpcCtx *ctx); +int32_t rpcGetConnInfo(void *thandle, SRpcConnInfo *pInfo); +void rpcSendRecv(void *shandle, SEpSet *pEpSet, SRpcMsg *pReq, SRpcMsg *pRsp); +void rpcSetDefaultAddr(void *thandle, const char *ip, const char *fqdn); #ifdef __cplusplus } diff --git a/include/os/osDir.h b/include/os/osDir.h index b549acde3706bd68e4b3f18aa24fb056eb96a189..a4c686e2807ee3d1fb9a8a0e1e05066d1b616c0b 100644 --- a/include/os/osDir.h +++ b/include/os/osDir.h @@ -31,6 +31,12 @@ extern "C" { #endif +#ifdef WINDOWS +#define TD_TMP_DIR_PATH "C:\\Windows\\Temp\\" +#else +#define TD_TMP_DIR_PATH "/tmp/" +#endif + typedef struct TdDir *TdDirPtr; typedef struct TdDirEntry *TdDirEntryPtr; diff --git a/include/util/taoserror.h b/include/util/taoserror.h index 1d7287ed0e954cf1f37b3e0dd395799747b080b3..65cfe8de0be9e387cecba70141c0bab513d6fc63 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -69,6 +69,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_DUP_KEY TAOS_DEF_ERROR_CODE(0, 0x0027) #define TSDB_CODE_NEED_RETRY TAOS_DEF_ERROR_CODE(0, 0x0028) #define TSDB_CODE_OUT_OF_RPC_MEMORY_QUEUE TAOS_DEF_ERROR_CODE(0, 0x0029) +#define TSDB_CODE_INVALID_TIMESTAMP TAOS_DEF_ERROR_CODE(0, 0x0030) #define TSDB_CODE_REF_NO_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0040) #define TSDB_CODE_REF_FULL TAOS_DEF_ERROR_CODE(0, 0x0041) @@ -253,6 +254,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_MND_TRANS_INVALID_STAGE TAOS_DEF_ERROR_CODE(0, 0x03D2) #define TSDB_CODE_MND_TRANS_CONFLICT TAOS_DEF_ERROR_CODE(0, 0x03D3) #define TSDB_CODE_MND_TRANS_UNKNOW_ERROR TAOS_DEF_ERROR_CODE(0, 0x03D4) +#define TSDB_CODE_MND_TRANS_CLOG_IS_NULL TAOS_DEF_ERROR_CODE(0, 0x03D5) // mnode-mq #define TSDB_CODE_MND_TOPIC_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x03E0) @@ -267,6 +269,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_MND_OFFSET_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x03E9) #define TSDB_CODE_MND_CONSUMER_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x03EA) #define TSDB_CODE_MND_TOPIC_SUBSCRIBED TAOS_DEF_ERROR_CODE(0, 0x03EB) +#define TSDB_CODE_MND_CGROUP_USED TAOS_DEF_ERROR_CODE(0, 0x03EC) // mnode-stream #define TSDB_CODE_MND_STREAM_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x03F0) @@ -312,6 +315,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_VND_INVALID_TABLE_ACTION TAOS_DEF_ERROR_CODE(0, 0x0519) #define TSDB_CODE_VND_COL_ALREADY_EXISTS TAOS_DEF_ERROR_CODE(0, 0x051a) #define TSDB_CODE_VND_TABLE_COL_NOT_EXISTS TAOS_DEF_ERROR_CODE(0, 0x051b) +#define TSDB_CODE_VND_READ_END TAOS_DEF_ERROR_CODE(0, 0x051c) // tsdb #define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600) @@ -419,6 +423,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_TQ_META_KEY_NOT_IN_TXN TAOS_DEF_ERROR_CODE(0, 0x0A09) #define TSDB_CODE_TQ_META_KEY_DUP_IN_TXN TAOS_DEF_ERROR_CODE(0, 0x0A0A) #define TSDB_CODE_TQ_GROUP_NOT_SET TAOS_DEF_ERROR_CODE(0, 0x0A0B) +#define TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND TAOS_DEF_ERROR_CODE(0, 0x0A0B) // wal #define TSDB_CODE_WAL_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x1000) @@ -636,6 +641,10 @@ int32_t* taosGetErrno(); #define TSDB_CODE_PAR_COMMENT_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x264E) #define TSDB_CODE_PAR_NOT_ALLOWED_FUNC TAOS_DEF_ERROR_CODE(0, 0x264F) #define TSDB_CODE_PAR_NOT_ALLOWED_WIN_QUERY TAOS_DEF_ERROR_CODE(0, 0x2650) +#define TSDB_CODE_PAR_INVALID_DROP_COL TAOS_DEF_ERROR_CODE(0, 0x2651) +#define TSDB_CODE_PAR_INVALID_COL_JSON TAOS_DEF_ERROR_CODE(0, 0x2652) +#define TSDB_CODE_PAR_VALUE_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x2653) +#define TSDB_CODE_PAR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x2654) //planner #define TSDB_CODE_PLAN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x2700) @@ -647,7 +656,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_FUNC_FUNTION_PARA_NUM TAOS_DEF_ERROR_CODE(0, 0x2801) #define TSDB_CODE_FUNC_FUNTION_PARA_TYPE TAOS_DEF_ERROR_CODE(0, 0x2802) #define TSDB_CODE_FUNC_FUNTION_PARA_VALUE TAOS_DEF_ERROR_CODE(0, 0x2803) -#define TSDB_CODE_FUNC_INVALID_FUNTION TAOS_DEF_ERROR_CODE(0, 0x2804) +#define TSDB_CODE_FUNC_NOT_BUILTIN_FUNTION TAOS_DEF_ERROR_CODE(0, 0x2804) //udf #define TSDB_CODE_UDF_STOPPING TAOS_DEF_ERROR_CODE(0, 0x2901) diff --git a/include/util/tdef.h b/include/util/tdef.h index 5cc687d7ab141c0eedabfcf6331f56af1a6175e5..ad7206f7bb1d5840a779cfcddbff6680538ad8d8 100644 --- a/include/util/tdef.h +++ b/include/util/tdef.h @@ -132,6 +132,7 @@ typedef enum EOperatorType { OP_TYPE_MOD, // unary arithmetic operator OP_TYPE_MINUS, + OP_TYPE_ASSIGN, // bit operator OP_TYPE_BIT_AND, @@ -233,6 +234,7 @@ typedef enum ELogicConditionType { #define TSDB_MAX_TAG_CONDITIONS 1024 #define TSDB_MAX_JSON_TAG_LEN 16384 +#define TSDB_MAX_JSON_KEY_LEN 256 #define TSDB_AUTH_LEN 16 #define TSDB_PASSWORD_LEN 32 @@ -245,7 +247,7 @@ typedef enum ELogicConditionType { #define TSDB_EP_LEN (TSDB_FQDN_LEN + 6) #define TSDB_IPv4ADDR_LEN 16 #define TSDB_FILENAME_LEN 128 -#define TSDB_SHOW_SQL_LEN 512 +#define TSDB_SHOW_SQL_LEN 1024 #define TSDB_SLOW_QUERY_SQL_LEN 512 #define TSDB_SHOW_SUBQUERY_LEN 1000 @@ -332,9 +334,12 @@ typedef enum ELogicConditionType { #define TSDB_DB_STREAM_MODE_OFF 0 #define TSDB_DB_STREAM_MODE_ON 1 #define TSDB_DEFAULT_DB_STREAM_MODE 0 -#define TSDB_DB_SINGLE_STABLE_ON 0 -#define TSDB_DB_SINGLE_STABLE_OFF 1 -#define TSDB_DEFAULT_DB_SINGLE_STABLE 0 +#define TSDB_DB_SINGLE_STABLE_ON 1 +#define TSDB_DB_SINGLE_STABLE_OFF 0 +#define TSDB_DEFAULT_DB_SINGLE_STABLE TSDB_DB_SINGLE_STABLE_OFF +#define TSDB_DB_SCHEMALESS_ON 1 +#define TSDB_DB_SCHEMALESS_OFF 0 +#define TSDB_DEFAULT_DB_SCHEMALESS TSDB_DB_SCHEMALESS_OFF #define TSDB_MIN_ROLLUP_FILE_FACTOR 0 #define TSDB_MAX_ROLLUP_FILE_FACTOR 1 @@ -426,11 +431,11 @@ enum { }; #define DEFAULT_HANDLE 0 -#define MNODE_HANDLE -1 -#define QNODE_HANDLE -2 -#define SNODE_HANDLE -3 -#define VNODE_HANDLE -4 -#define BNODE_HANDLE -5 +#define MNODE_HANDLE 1 +#define QNODE_HANDLE -1 +#define SNODE_HANDLE -2 +#define VNODE_HANDLE -3 +#define BNODE_HANDLE -4 #define TSDB_CONFIG_OPTION_LEN 16 #define TSDB_CONIIG_VALUE_LEN 48 diff --git a/include/util/tdigest.h b/include/util/tdigest.h new file mode 100644 index 0000000000000000000000000000000000000000..f9b615318f5c33f0cf386653367ddfe36ae759f8 --- /dev/null +++ b/include/util/tdigest.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +/* + * include/tdigest.c + * + * Copyright (c) 2016, Usman Masood + */ + +#ifndef TDIGEST_H +#define TDIGEST_H + +#ifndef M_PI +#define M_PI 3.14159265358979323846264338327950288 /* pi */ +#endif + +#define DOUBLE_MAX 1.79e+308 + +#define ADDITION_CENTROID_NUM 2 +#define COMPRESSION 300 +#define GET_CENTROID(compression) (ceil(compression * M_PI / 2) + 1 + ADDITION_CENTROID_NUM) +#define GET_THRESHOLD(compression) (7.5 + 0.37 * compression - 2e-4 * pow(compression, 2)) +#define TDIGEST_SIZE(compression) (sizeof(TDigest) + sizeof(SCentroid)*GET_CENTROID(compression) + sizeof(SPt)*GET_THRESHOLD(compression)) + +typedef struct SCentroid { + double mean; + int64_t weight; +}SCentroid; + +typedef struct SPt { + double value; + int64_t weight; +}SPt; + +typedef struct TDigest { + double compression; + int32_t threshold; + int64_t size; + + int64_t total_weight; + double min; + double max; + + int32_t num_buffered_pts; + SPt *buffered_pts; + + int32_t num_centroids; + SCentroid *centroids; +}TDigest; + +TDigest *tdigestNewFrom(void* pBuf, int32_t compression); +void tdigestAdd(TDigest *t, double x, int64_t w); +void tdigestMerge(TDigest *t1, TDigest *t2); +double tdigestQuantile(TDigest *t, double q); +void tdigestCompress(TDigest *t); +void tdigestFreeFrom(TDigest *t); +void tdigestAutoFill(TDigest* t, int32_t compression); + +#endif /* TDIGEST_H */ diff --git a/include/util/tencode.h b/include/util/tencode.h index 938e3018a86168d099db9e6ff2398b3cb4ebf1d5..cbacd59fa7873c4cb05b8fdaefb321ae3f854e5b 100644 --- a/include/util/tencode.h +++ b/include/util/tencode.h @@ -39,11 +39,11 @@ typedef struct { } SEncoder; typedef struct { - const uint8_t* data; - uint32_t size; - uint32_t pos; - SCoderMem* mList; - SDecoderNode* dStack; + uint8_t* data; + uint32_t size; + uint32_t pos; + SCoderMem* mList; + SDecoderNode* dStack; } SDecoder; #define tPut(TYPE, BUF, VAL) ((TYPE*)(BUF))[0] = (VAL) @@ -82,7 +82,7 @@ typedef struct { do { \ SEncoder coder = {0}; \ tEncoderInit(&coder, NULL, 0); \ - if ((E)(&coder, S) == 0) { \ + if ((E)(&coder, S) >= 0) { \ SIZE = coder.pos; \ RET = 0; \ } else { \ @@ -120,7 +120,7 @@ static int32_t tEncodeCStrWithLen(SEncoder* pCoder, const char* val, uint32_t le static int32_t tEncodeCStr(SEncoder* pCoder, const char* val); /* ------------------------ DECODE ------------------------ */ -void tDecoderInit(SDecoder* pCoder, const uint8_t* data, uint32_t size); +void tDecoderInit(SDecoder* pCoder, uint8_t* data, uint32_t size); void tDecoderClear(SDecoder* SDecoder); int32_t tStartDecode(SDecoder* pCoder); void tEndDecode(SDecoder* pCoder); @@ -141,9 +141,9 @@ static int32_t tDecodeU64v(SDecoder* pCoder, uint64_t* val); static int32_t tDecodeI64v(SDecoder* pCoder, int64_t* val); static int32_t tDecodeFloat(SDecoder* pCoder, float* val); static int32_t tDecodeDouble(SDecoder* pCoder, double* val); -static int32_t tDecodeBinary(SDecoder* pCoder, const uint8_t** val, uint32_t* len); -static int32_t tDecodeCStrAndLen(SDecoder* pCoder, const char** val, uint32_t* len); -static int32_t tDecodeCStr(SDecoder* pCoder, const char** val); +static int32_t tDecodeBinary(SDecoder* pCoder, uint8_t** val, uint32_t* len); +static int32_t tDecodeCStrAndLen(SDecoder* pCoder, char** val, uint32_t* len); +static int32_t tDecodeCStr(SDecoder* pCoder, char** val); static int32_t tDecodeCStrTo(SDecoder* pCoder, char* val); /* ------------------------ IMPL ------------------------ */ @@ -317,7 +317,7 @@ static FORCE_INLINE int32_t tDecodeI16v(SDecoder* pCoder, int16_t* val) { if (tDecodeU16v(pCoder, &tval) < 0) { return -1; } - *val = ZIGZAGD(int16_t, tval); + if (val) *val = ZIGZAGD(int16_t, tval); return 0; } @@ -331,7 +331,7 @@ static FORCE_INLINE int32_t tDecodeI32v(SDecoder* pCoder, int32_t* val) { if (tDecodeU32v(pCoder, &tval) < 0) { return -1; } - *val = ZIGZAGD(int32_t, tval); + if (val) *val = ZIGZAGD(int32_t, tval); return 0; } @@ -345,7 +345,7 @@ static FORCE_INLINE int32_t tDecodeI64v(SDecoder* pCoder, int64_t* val) { if (tDecodeU64v(pCoder, &tval) < 0) { return -1; } - *val = ZIGZAGD(int64_t, tval); + if (val) *val = ZIGZAGD(int64_t, tval); return 0; } @@ -377,7 +377,7 @@ static FORCE_INLINE int32_t tDecodeDouble(SDecoder* pCoder, double* val) { return 0; } -static FORCE_INLINE int32_t tDecodeBinary(SDecoder* pCoder, const uint8_t** val, uint32_t* len) { +static FORCE_INLINE int32_t tDecodeBinary(SDecoder* pCoder, uint8_t** val, uint32_t* len) { if (tDecodeU32v(pCoder, len) < 0) return -1; if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, *len)) return -1; @@ -389,20 +389,20 @@ static FORCE_INLINE int32_t tDecodeBinary(SDecoder* pCoder, const uint8_t** val, return 0; } -static FORCE_INLINE int32_t tDecodeCStrAndLen(SDecoder* pCoder, const char** val, uint32_t* len) { - if (tDecodeBinary(pCoder, (const uint8_t**)val, len) < 0) return -1; +static FORCE_INLINE int32_t tDecodeCStrAndLen(SDecoder* pCoder, char** val, uint32_t* len) { + if (tDecodeBinary(pCoder, (uint8_t**)val, len) < 0) return -1; (*len) -= 1; return 0; } -static FORCE_INLINE int32_t tDecodeCStr(SDecoder* pCoder, const char** val) { +static FORCE_INLINE int32_t tDecodeCStr(SDecoder* pCoder, char** val) { uint32_t len; return tDecodeCStrAndLen(pCoder, val, &len); } static int32_t tDecodeCStrTo(SDecoder* pCoder, char* val) { - const char* pStr; - uint32_t len; + char* pStr; + uint32_t len; if (tDecodeCStrAndLen(pCoder, &pStr, &len) < 0) return -1; memcpy(val, pStr, len + 1); diff --git a/include/util/tlist.h b/include/util/tlist.h index 43833d7ecd84f09643546f3f3fa838edbd1dabf1..1954bda145a48f249875bda8ea3389b4fbed22be 100644 --- a/include/util/tlist.h +++ b/include/util/tlist.h @@ -229,7 +229,7 @@ int32_t tdListAppend(SList *list, void *data); SListNode *tdListPopHead(SList *list); SListNode *tdListPopTail(SList *list); SListNode *tdListGetHead(SList *list); -SListNode *tsListGetTail(SList *list); +SListNode *tdListGetTail(SList *list); SListNode *tdListPopNode(SList *list, SListNode *node); void tdListMove(SList *src, SList *dst); void tdListDiscard(SList *list); diff --git a/include/util/tlog.h b/include/util/tlog.h index be31aa8115ab91dabe898df45abdcba45b50d72d..988d9c6890832d17a7e9acd2b496e3ef6ba63d90 100644 --- a/include/util/tlog.h +++ b/include/util/tlog.h @@ -62,6 +62,7 @@ extern int32_t fsDebugFlag; extern int32_t metaDebugFlag; extern int32_t fnDebugFlag; extern int32_t smaDebugFlag; +extern int32_t idxDebugFlag; int32_t taosInitLog(const char *logName, int32_t maxFiles); void taosCloseLog(); @@ -88,6 +89,7 @@ void taosPrintLongString(const char *flags, ELogLevel level, int32_t dflag, cons #define uInfo(...) { if (uDebugFlag & DEBUG_INFO) { taosPrintLog("UTL ", DEBUG_INFO, tsLogEmbedded ? 255 : uDebugFlag, __VA_ARGS__); }} #define uDebug(...) { if (uDebugFlag & DEBUG_DEBUG) { taosPrintLog("UTL ", DEBUG_DEBUG, uDebugFlag, __VA_ARGS__); }} #define uTrace(...) { if (uDebugFlag & DEBUG_TRACE) { taosPrintLog("UTL ", DEBUG_TRACE, uDebugFlag, __VA_ARGS__); }} +#define uDebugL(...) { if (uDebugFlag & DEBUG_DEBUG) { taosPrintLongString("UTL ", DEBUG_DEBUG, uDebugFlag, __VA_ARGS__); }} #define pError(...) { taosPrintLog("APP ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); } #define pPrint(...) { taosPrintLog("APP ", DEBUG_INFO, 255, __VA_ARGS__); } diff --git a/include/util/tqueue.h b/include/util/tqueue.h index dbc4d03177e4c489240c04aac37710ce995102d4..466c577c0079d07774722ff2efdd30bf207e0fc3 100644 --- a/include/util/tqueue.h +++ b/include/util/tqueue.h @@ -46,6 +46,7 @@ typedef struct { void *ahandle; int32_t workerId; int32_t threadNum; + int64_t timestamp; } SQueueInfo; typedef enum { @@ -80,7 +81,7 @@ int32_t taosAddIntoQset(STaosQset *qset, STaosQueue *queue, void *ahandle); void taosRemoveFromQset(STaosQset *qset, STaosQueue *queue); int32_t taosGetQueueNumber(STaosQset *qset); -int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, void **ahandle, FItem *itemFp); +int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, int64_t *ts, void **ahandle, FItem *itemFp); int32_t taosReadAllQitemsFromQset(STaosQset *qset, STaosQall *qall, void **ahandle, FItems *itemsFp); void taosResetQsetThread(STaosQset *qset, void *pItem); diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index 2b2667432447c11416efb94e45753c5dd9ff2a0c..f07705ff442b65f1295431e59b48ef50a76cadc0 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -6,16 +6,36 @@ set -e #set -x +verMode=edge +pagMode=full + +iplist="" +serverFqdn="" + # -----------------------Variables definition--------------------- script_dir=$(dirname $(readlink -f "$0")) # Dynamic directory -data_dir="/var/lib/taos" -log_dir="/var/log/taos" -data_link_dir="/usr/local/taos/data" -log_link_dir="/usr/local/taos/log" +clientName="taos" +serverName="taosd" +configFile="taos.cfg" +productName="TDengine" +emailName="taosdata.com" +uninstallScript="rmtaos" +historyFile="taos_history" +tarName="taos.tar.gz" +dataDir="/var/lib/taos" +logDir="/var/log/taos" +configDir="/etc/taos" +installDir="/usr/local/taos" +adapterName="taosadapter" +benchmarkName="taosBenchmark" +dumpName="taosdump" +demoName="taosdemo" -cfg_install_dir="/etc/taos" +data_dir=${dataDir} +log_dir=${logDir} +cfg_install_dir=${configDir} bin_link_dir="/usr/bin" lib_link_dir="/usr/lib" @@ -23,21 +43,13 @@ lib64_link_dir="/usr/lib64" inc_link_dir="/usr/include" #install main path -install_main_dir="/usr/local/taos" - +install_main_dir=${installDir} # old bin dir -bin_dir="/usr/local/taos/bin" +bin_dir="${installDir}/bin" service_config_dir="/etc/systemd/system" - -#taos-tools para -demoName="taosdemo" -benchmarkName="taosBenchmark" -dumpName="taosdump" -emailName="taosdata.com" -taosName="taos" -toolsName="taostools" - +nginx_port=6060 +nginx_dir="/usr/local/nginxd" # Color setting RED='\033[0;31m' @@ -47,8 +59,8 @@ GREEN_UNDERLINE='\033[4;32m' NC='\033[0m' csudo="" -if command -v sudo > /dev/null; then - csudo="sudo" +if command -v sudo >/dev/null; then + csudo="sudo " fi update_flag=0 @@ -56,52 +68,51 @@ prompt_force=0 initd_mod=0 service_mod=2 -if pidof systemd &> /dev/null; then - service_mod=0 -elif $(which service &> /dev/null); then - service_mod=1 - service_config_dir="/etc/init.d" - if $(which chkconfig &> /dev/null); then - initd_mod=1 - elif $(which insserv &> /dev/null); then - initd_mod=2 - elif $(which update-rc.d &> /dev/null); then - initd_mod=3 - else - service_mod=2 - fi -else +if pidof systemd &>/dev/null; then + service_mod=0 +elif $(which service &>/dev/null); then + service_mod=1 + service_config_dir="/etc/init.d" + if $(which chkconfig &>/dev/null); then + initd_mod=1 + elif $(which insserv &>/dev/null); then + initd_mod=2 + elif $(which update-rc.d &>/dev/null); then + initd_mod=3 + else service_mod=2 + fi +else + service_mod=2 fi - # get the operating system type for using the corresponding init file # ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification #osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) if [[ -e /etc/os-release ]]; then - osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||: + osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) || : else osinfo="" fi #echo "osinfo: ${osinfo}" os_type=0 -if echo $osinfo | grep -qwi "ubuntu" ; then -# echo "This is ubuntu system" +if echo $osinfo | grep -qwi "ubuntu"; then + # echo "This is ubuntu system" os_type=1 -elif echo $osinfo | grep -qwi "debian" ; then -# echo "This is debian system" +elif echo $osinfo | grep -qwi "debian"; then + # echo "This is debian system" os_type=1 -elif echo $osinfo | grep -qwi "Kylin" ; then -# echo "This is Kylin system" +elif echo $osinfo | grep -qwi "Kylin"; then + # echo "This is Kylin system" os_type=1 -elif echo $osinfo | grep -qwi "centos" ; then -# echo "This is centos system" +elif echo $osinfo | grep -qwi "centos"; then + # echo "This is centos system" os_type=2 -elif echo $osinfo | grep -qwi "fedora" ; then -# echo "This is fedora system" +elif echo $osinfo | grep -qwi "fedora"; then + # echo "This is fedora system" os_type=2 -elif echo $osinfo | grep -qwi "Linx" ; then -# echo "This is Linx system" +elif echo $osinfo | grep -qwi "Linx"; then + # echo "This is Linx system" os_type=1 service_mod=0 initd_mod=0 @@ -110,43 +121,41 @@ else echo " osinfo: ${osinfo}" echo " This is an officially unverified linux system," echo " if there are any problems with the installation and operation, " - echo " please feel free to contact taosdata.com for support." + echo " please feel free to contact ${emailName} for support." os_type=1 fi - # ============================= get input parameters ================================================= # install.sh -v [server | client] -e [yes | no] -i [systemd | service | ...] # set parameters by default value -interactiveFqdn=yes # [yes | no] -verType=server # [server | client] -initType=systemd # [systemd | service | ...] +interactiveFqdn=yes # [yes | no] +verType=server # [server | client] +initType=systemd # [systemd | service | ...] -while getopts "hv:e:i:" arg -do +while getopts "hv:e:i:" arg; do case $arg in - e) - #echo "interactiveFqdn=$OPTARG" - interactiveFqdn=$( echo $OPTARG ) - ;; - v) - #echo "verType=$OPTARG" - verType=$(echo $OPTARG) - ;; - i) - #echo "initType=$OPTARG" - initType=$(echo $OPTARG) - ;; - h) - echo "Usage: `basename $0` -v [server | client] -e [yes | no]" - exit 0 - ;; - ?) #unknow option - echo "unkonw argument" - exit 1 - ;; + e) + #echo "interactiveFqdn=$OPTARG" + interactiveFqdn=$(echo $OPTARG) + ;; + v) + #echo "verType=$OPTARG" + verType=$(echo $OPTARG) + ;; + i) + #echo "initType=$OPTARG" + initType=$(echo $OPTARG) + ;; + h) + echo "Usage: $(basename $0) -v [server | client] -e [yes | no]" + exit 0 + ;; + ?) #unknow option + echo "unkonw argument" + exit 1 + ;; esac done @@ -155,98 +164,163 @@ done function kill_process() { pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}') if [ -n "$pid" ]; then - ${csudo} kill -9 $pid || : + ${csudo}kill -9 $pid || : fi } function install_main_path() { - #create install main dir and all sub dir - ${csudo} rm -rf ${install_main_dir} || : - ${csudo} mkdir -p ${install_main_dir} - ${csudo} mkdir -p ${install_main_dir}/cfg - ${csudo} mkdir -p ${install_main_dir}/bin - ${csudo} mkdir -p ${install_main_dir}/connector - ${csudo} mkdir -p ${install_main_dir}/lib - ${csudo} mkdir -p ${install_main_dir}/examples - ${csudo} mkdir -p ${install_main_dir}/include - ${csudo} mkdir -p ${install_main_dir}/init.d - if [ "$verMode" == "cluster" ]; then - ${csudo} mkdir -p ${nginx_dir} - fi + #create install main dir and all sub dir + ${csudo}rm -rf ${install_main_dir} || : + ${csudo}mkdir -p ${install_main_dir} + ${csudo}mkdir -p ${install_main_dir}/cfg + ${csudo}mkdir -p ${install_main_dir}/bin + # ${csudo}mkdir -p ${install_main_dir}/connector + ${csudo}mkdir -p ${install_main_dir}/driver + ${csudo}mkdir -p ${install_main_dir}/examples + ${csudo}mkdir -p ${install_main_dir}/include + # ${csudo}mkdir -p ${install_main_dir}/init.d + if [ "$verMode" == "cluster" ]; then + ${csudo}mkdir -p ${nginx_dir} + fi - if [[ -e ${script_dir}/email ]]; then - ${csudo} cp ${script_dir}/email ${install_main_dir}/ ||: - fi + if [[ -e ${script_dir}/email ]]; then + ${csudo}cp ${script_dir}/email ${install_main_dir}/ || : + fi } function install_bin() { - # Remove links - ${csudo} rm -f ${bin_link_dir}/taos || : - ${csudo} rm -f ${bin_link_dir}/taosd || : - ${csudo} rm -f ${bin_link_dir}/taosadapter || : - ${csudo} rm -f ${bin_link_dir}/create_table || : - ${csudo} rm -f ${bin_link_dir}/tmq_sim || : - ${csudo} rm -f ${bin_link_dir}/taosdump || : - ${csudo} rm -f ${bin_link_dir}/rmtaos || : - #${csudo} rm -f ${bin_link_dir}/set_core || : - - ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* - - #Make link - [ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || : - [ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || : - [ -x ${install_main_dir}/bin/create_table ] && ${csudo} ln -s ${install_main_dir}/bin/create_table ${bin_link_dir}/create_table || : - [ -x ${install_main_dir}/bin/tmq_sim ] && ${csudo} ln -s ${install_main_dir}/bin/tmq_sim ${bin_link_dir}/tmq_sim || : -# [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : -# [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : - [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || : -# [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : + # Remove links + ${csudo}rm -f ${bin_link_dir}/${clientName} || : + ${csudo}rm -f ${bin_link_dir}/${serverName} || : + ${csudo}rm -f ${bin_link_dir}/${adapterName} || : + ${csudo}rm -f ${bin_link_dir}/${uninstallScript} || : + ${csudo}rm -f ${bin_link_dir}/tarbitrator || : + ${csudo}rm -f ${bin_link_dir}/set_core || : + ${csudo}rm -f ${bin_link_dir}/run_${serverName}_and_${adapterName}.sh || : + ${csudo}rm -f ${bin_link_dir}/TDinsight.sh || : + + ${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/* + + #Make link + [ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} || : + [ -x ${install_main_dir}/bin/${serverName} ] && ${csudo}ln -s ${install_main_dir}/bin/${serverName} ${bin_link_dir}/${serverName} || : + [ -x ${install_main_dir}/bin/${adapterName} ] && ${csudo}ln -s ${install_main_dir}/bin/${adapterName} ${bin_link_dir}/${adapterName} || : + [ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${demoName} || : + [ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${benchmarkName} || : + [ -x ${install_main_dir}/bin/${dumpName} ] && ${csudo}ln -s ${install_main_dir}/bin/${dumpName} ${bin_link_dir}/${dumpName} || : + [ -x ${install_main_dir}/bin/TDinsight.sh ] && ${csudo}ln -s ${install_main_dir}/bin/TDinsight.sh ${bin_link_dir}/TDinsight.sh || : + [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || : + [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : + [ -x ${install_main_dir}/bin/run_${serverName}_and_${adapterName}.sh ] && ${csudo}ln -s ${install_main_dir}/bin/run_${serverName}_and_${adapterName}.sh ${bin_link_dir}/run_${serverName}_and_${adapterName}.sh || : + [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo}ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : + + if [ "$verMode" == "cluster" ]; then + ${csudo}cp -r ${script_dir}/nginxd/* ${nginx_dir} && ${csudo}chmod 0555 ${nginx_dir}/* + ${csudo}mkdir -p ${nginx_dir}/logs + ${csudo}chmod 777 ${nginx_dir}/sbin/nginx + fi } function install_lib() { - # Remove links - ${csudo} rm -f ${lib_link_dir}/libtaos.* || : - ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : - ${csudo} rm -f ${lib_link_dir}/libtdb.* || : - ${csudo} rm -f ${lib64_link_dir}/libtdb.* || : + # Remove links + ${csudo}rm -f ${lib_link_dir}/libtaos.* || : + ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : + #${csudo}rm -rf ${v15_java_app_dir} || : + ${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/* + + ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 + ${csudo}ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so + + if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then + ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : + ${csudo}ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : + fi - ${csudo} cp -rf ${script_dir}/lib/* ${install_main_dir}/lib && ${csudo} chmod 777 ${install_main_dir}/lib/* + ${csudo}ldconfig +} - ${csudo} ln -s ${install_main_dir}/lib/libtaos.* ${lib_link_dir}/libtaos.so.1 - ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so +function install_avro() { + if [ "$osType" != "Darwin" ]; then + avro_dir=${script_dir}/avro + if [ -f "${avro_dir}/lib/libavro.so.23.0.0" ] && [ -d /usr/local/$1 ]; then + ${csudo}/usr/bin/install -c -d /usr/local/$1 + ${csudo}/usr/bin/install -c -m 755 ${avro_dir}/lib/libavro.so.23.0.0 /usr/local/$1 + ${csudo}ln -sf /usr/local/$1/libavro.so.23.0.0 /usr/local/$1/libavro.so.23 + ${csudo}ln -sf /usr/local/$1/libavro.so.23 /usr/local/$1/libavro.so + + ${csudo}/usr/bin/install -c -d /usr/local/$1 + [ -f ${avro_dir}/lib/libavro.a ] && + ${csudo}/usr/bin/install -c -m 755 ${avro_dir}/lib/libavro.a /usr/local/$1 + + if [ -d /etc/ld.so.conf.d ]; then + echo "/usr/local/$1" | ${csudo}tee /etc/ld.so.conf.d/libavro.conf >/dev/null || echo -e "failed to write /etc/ld.so.conf.d/libavro.conf" + ${csudo}ldconfig + else + echo "/etc/ld.so.conf.d not found!" + fi + fi + fi +} - if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then - ${csudo} ln -s ${install_main_dir}/lib/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : - ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : +function install_jemalloc() { + jemalloc_dir=${script_dir}/jemalloc + + if [ -d ${jemalloc_dir} ]; then + ${csudo}/usr/bin/install -c -d /usr/local/bin + + if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then + ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin + fi + if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then + ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin + fi + if [ -f ${jemalloc_dir}/bin/jeprof ]; then + ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin + fi + if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then + ${csudo}/usr/bin/install -c -d /usr/local/include/jemalloc + ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc + fi + if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then + ${csudo}/usr/bin/install -c -d /usr/local/lib + ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib + ${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so + ${csudo}/usr/bin/install -c -d /usr/local/lib + if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then + ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib + fi + if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then + ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib + fi + if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then + ${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig + ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig + fi + fi + if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then + ${csudo}/usr/bin/install -c -d /usr/local/share/doc/jemalloc + ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc + fi + if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then + ${csudo}/usr/bin/install -c -d /usr/local/share/man/man3 + ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3 fi - ${csudo} ldconfig + if [ -d /etc/ld.so.conf.d ]; then + echo "/usr/local/lib" | ${csudo}tee /etc/ld.so.conf.d/jemalloc.conf >/dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf" + ${csudo}ldconfig + else + echo "/etc/ld.so.conf.d not found!" + fi + fi } function install_header() { - ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || : - ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* - ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h -# ${csudo} ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h - ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h -} - -# temp install taosBenchmark -function install_taosTools() { - ${csudo} rm -f ${bin_link_dir}/${benchmarkName} || : - ${csudo} rm -f ${bin_link_dir}/${dumpName} || : - ${csudo} rm -f ${bin_link_dir}/rm${toolsName} || : - - ${csudo} /usr/bin/install -c -m 755 ${script_dir}/bin/${dumpName} ${install_main_dir}/bin/${dumpName} - ${csudo} /usr/bin/install -c -m 755 ${script_dir}/bin/${benchmarkName} ${install_main_dir}/bin/${benchmarkName} - ${csudo} ln -sf ${install_main_dir}/bin/${benchmarkName} ${install_main_dir}/bin/${demoName} - #Make link - [[ -x ${install_main_dir}/bin/${benchmarkName} ]] && \ - ${csudo} ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${benchmarkName} || : - [[ -x ${install_main_dir}/bin/${demoName} ]] && \ - ${csudo} ln -s ${install_main_dir}/bin/${demoName} ${bin_link_dir}/${demoName} || : - [[ -x ${install_main_dir}/bin/${dumpName} ]] && \ - ${csudo} ln -s ${install_main_dir}/bin/${dumpName} ${bin_link_dir}/${dumpName} || : + ${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || : + ${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/* + ${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h + ${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h } function add_newHostname_to_hosts() { @@ -256,18 +330,17 @@ function add_newHostname_to_hosts() { iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') arr=($iphost) IFS="$OLD_IFS" - for s in "${arr[@]}" - do + for s in "${arr[@]}"; do if [[ "$s" == "$localIp" ]]; then return fi done - ${csudo} echo "127.0.0.1 $1" >> /etc/hosts ||: + ${csudo}echo "127.0.0.1 $1" >>/etc/hosts || : } function set_hostname() { echo -e -n "${GREEN}Please enter one hostname(must not be 'localhost')${NC}:" - read newHostname + read newHostname while true; do if [[ ! -z "$newHostname" && "$newHostname" != "localhost" ]]; then break @@ -276,28 +349,25 @@ function set_hostname() { fi done - ${csudo} hostname $newHostname ||: - retval=`echo $?` + ${csudo}hostname $newHostname || : + retval=$(echo $?) if [[ $retval != 0 ]]; then - echo - echo "set hostname fail!" - return + echo + echo "set hostname fail!" + return fi - #echo -e -n "$(hostnamectl status --static)" - #echo -e -n "$(hostnamectl status --transient)" - #echo -e -n "$(hostnamectl status --pretty)" #ubuntu/centos /etc/hostname if [[ -e /etc/hostname ]]; then - ${csudo} echo $newHostname > /etc/hostname ||: + ${csudo}echo $newHostname >/etc/hostname || : fi #debian: #HOSTNAME=yourname if [[ -e /etc/sysconfig/network ]]; then - ${csudo} sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||: + ${csudo}sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network || : fi - ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/taos.cfg + ${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/${configFile} serverFqdn=$newHostname if [[ -e /etc/hosts ]]; then @@ -311,20 +381,19 @@ function is_correct_ipaddr() { IFS=" " arr=($iplist) IFS="$OLD_IFS" - for s in "${arr[@]}" - do - if [[ "$s" == "$newIp" ]]; then - return 0 - fi + for s in "${arr[@]}"; do + if [[ "$s" == "$newIp" ]]; then + return 0 + fi done return 1 } function set_ipAsFqdn() { - iplist=$(ip address |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F "/" '{print $1}') ||: + iplist=$(ip address | grep inet | grep -v inet6 | grep -v 127.0.0.1 | awk '{print $2}' | awk -F "/" '{print $1}') || : if [ -z "$iplist" ]; then - iplist=$(ifconfig |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F ":" '{print $2}') ||: + iplist=$(ifconfig | grep inet | grep -v inet6 | grep -v 127.0.0.1 | awk '{print $2}' | awk -F ":" '{print $2}') || : fi if [ -z "$iplist" ]; then @@ -332,7 +401,7 @@ function set_ipAsFqdn() { echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}" localFqdn="127.0.0.1" # Write the local FQDN to configuration file - ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg + ${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/${configFile} serverFqdn=$localFqdn echo return @@ -345,23 +414,23 @@ function set_ipAsFqdn() { echo echo -e -n "${GREEN}Notes: if IP is used as the node name, data can NOT be migrated to other machine directly${NC}:" read localFqdn - while true; do - if [ ! -z "$localFqdn" ]; then - # Check if correct ip address - is_correct_ipaddr $localFqdn - retval=`echo $?` - if [[ $retval != 0 ]]; then - read -p "Please choose an IP from local IP list:" localFqdn - else - # Write the local FQDN to configuration file - ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg - serverFqdn=$localFqdn - break - fi - else + while true; do + if [ ! -z "$localFqdn" ]; then + # Check if correct ip address + is_correct_ipaddr $localFqdn + retval=$(echo $?) + if [[ $retval != 0 ]]; then read -p "Please choose an IP from local IP list:" localFqdn + else + # Write the local FQDN to configuration file + ${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/${configFile} + serverFqdn=$localFqdn + break fi - done + else + read -p "Please choose an IP from local IP list:" localFqdn + fi + done } function local_fqdn_check() { @@ -369,205 +438,553 @@ function local_fqdn_check() { echo echo -e -n "System hostname is: ${GREEN}$serverFqdn${NC}" echo - if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then + if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then echo -e -n "${GREEN}It is strongly recommended to configure a hostname for this machine ${NC}" echo - while true - do - read -r -p "Set hostname now? [Y/n] " input - if [ ! -n "$input" ]; then - set_hostname - break - else - case $input in - [yY][eE][sS]|[yY]) - set_hostname - break - ;; - - [nN][oO]|[nN]) - set_ipAsFqdn - break - ;; - - *) - echo "Invalid input..." - ;; - esac - fi + while true; do + read -r -p "Set hostname now? [Y/n] " input + if [ ! -n "$input" ]; then + set_hostname + break + else + case $input in + [yY][eE][sS] | [yY]) + set_hostname + break + ;; + + [nN][oO] | [nN]) + set_ipAsFqdn + break + ;; + + *) + echo "Invalid input..." + ;; + esac + fi done fi } +function install_adapter_config() { + if [ ! -f "${cfg_install_dir}/${adapterName}.toml" ]; then + ${csudo}mkdir -p ${cfg_install_dir} + [ -f ${script_dir}/cfg/${adapterName}.toml ] && ${csudo}cp ${script_dir}/cfg/${adapterName}.toml ${cfg_install_dir} + [ -f ${cfg_install_dir}/${adapterName}.toml ] && ${csudo}chmod 644 ${cfg_install_dir}/${adapterName}.toml + fi + + [ -f ${script_dir}/cfg/${adapterName}.toml ] && + ${csudo}cp -f ${script_dir}/cfg/${adapterName}.toml ${cfg_install_dir}/${adapterName}.toml.new + + [ -f ${cfg_install_dir}/${adapterName}.toml ] && + ${csudo}ln -s ${cfg_install_dir}/${adapterName}.toml ${install_main_dir}/cfg/${adapterName}.toml + + [ ! -z $1 ] && return 0 || : # only install client + +} + +function install_config() { + + if [ ! -f "${cfg_install_dir}/${configFile}" ]; then + ${csudo}mkdir -p ${cfg_install_dir} + [ -f ${script_dir}/cfg/${configFile} ] && ${csudo}cp ${script_dir}/cfg/${configFile} ${cfg_install_dir} + ${csudo}chmod 644 ${cfg_install_dir}/* + fi + + ${csudo}cp -f ${script_dir}/cfg/${configFile} ${cfg_install_dir}/${configFile}.new + ${csudo}ln -s ${cfg_install_dir}/${configFile} ${install_main_dir}/cfg + + [ ! -z $1 ] && return 0 || : # only install client + + if ((${update_flag} == 1)); then + return 0 + fi + + if [ "$interactiveFqdn" == "no" ]; then + return 0 + fi + + local_fqdn_check + + echo + echo -e -n "${GREEN}Enter FQDN:port (like h1.${emailName}:6030) of an existing ${productName} cluster node to join${NC}" + echo + echo -e -n "${GREEN}OR leave it blank to build one${NC}:" + read firstEp + while true; do + if [ ! -z "$firstEp" ]; then + ${csudo}sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/${configFile} + break + else + break + fi + done + + echo + echo -e -n "${GREEN}Enter your email address for priority support or enter empty to skip${NC}: " + read emailAddr + while true; do + if [ ! -z "$emailAddr" ]; then + email_file="${install_main_dir}/email" + ${csudo}bash -c "echo $emailAddr > ${email_file}" + break + else + break + fi + done +} + function install_log() { - ${csudo} rm -rf ${log_dir} || : - ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} + ${csudo}rm -rf ${log_dir} || : + ${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir} - ${csudo} ln -s ${log_dir} ${install_main_dir}/log + ${csudo}ln -s ${log_dir} ${install_main_dir}/log } function install_data() { - ${csudo} mkdir -p ${data_dir} + ${csudo}mkdir -p ${data_dir} - ${csudo} ln -s ${data_dir} ${install_main_dir}/data + ${csudo}ln -s ${data_dir} ${install_main_dir}/data } -function clean_service_on_systemd() { - taosd_service_config="${service_config_dir}/taosd.service" - if systemctl is-active --quiet taosd; then - echo "TDengine is running, stopping it..." - ${csudo} systemctl stop taosd &> /dev/null || echo &> /dev/null - fi - ${csudo} systemctl disable taosd &> /dev/null || echo &> /dev/null - ${csudo} rm -f ${taosd_service_config} +function install_connector() { + [ -d "${script_dir}/connector/" ] && ${csudo}cp -rf ${script_dir}/connector/ ${install_main_dir}/ +} + +function install_examples() { + if [ -d ${script_dir}/examples ]; then + ${csudo}cp -rf ${script_dir}/examples/* ${install_main_dir}/examples + fi +} + +function clean_service_on_sysvinit() { + if pidof ${serverName} &>/dev/null; then + ${csudo}service ${serverName} stop || : + fi - tarbitratord_service_config="${service_config_dir}/tarbitratord.service" - if systemctl is-active --quiet tarbitratord; then - echo "tarbitrator is running, stopping it..." - ${csudo} systemctl stop tarbitratord &> /dev/null || echo &> /dev/null + if pidof tarbitrator &>/dev/null; then + ${csudo}service tarbitratord stop || : + fi + + if ((${initd_mod} == 1)); then + if [ -e ${service_config_dir}/${serverName} ]; then + ${csudo}chkconfig --del ${serverName} || : fi - ${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null - ${csudo} rm -f ${tarbitratord_service_config} - if [ "$verMode" == "cluster" ]; then - nginx_service_config="${service_config_dir}/nginxd.service" - if systemctl is-active --quiet nginxd; then - echo "Nginx for TDengine is running, stopping it..." - ${csudo} systemctl stop nginxd &> /dev/null || echo &> /dev/null - fi - ${csudo} systemctl disable nginxd &> /dev/null || echo &> /dev/null - ${csudo} rm -f ${nginx_service_config} + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo}chkconfig --del tarbitratord || : + fi + elif ((${initd_mod} == 2)); then + if [ -e ${service_config_dir}/${serverName} ]; then + ${csudo}insserv -r ${serverName} || : + fi + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo}insserv -r tarbitratord || : + fi + elif ((${initd_mod} == 3)); then + if [ -e ${service_config_dir}/${serverName} ]; then + ${csudo}update-rc.d -f ${serverName} remove || : + fi + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo}update-rc.d -f tarbitratord remove || : fi + fi + + ${csudo}rm -f ${service_config_dir}/${serverName} || : + ${csudo}rm -f ${service_config_dir}/tarbitratord || : + + if $(which init &>/dev/null); then + ${csudo}init q || : + fi +} + +function install_service_on_sysvinit() { + clean_service_on_sysvinit + sleep 1 + + if ((${os_type} == 1)); then + # ${csudo}cp -f ${script_dir}/init.d/${serverName}.deb ${install_main_dir}/init.d/${serverName} + ${csudo}cp ${script_dir}/init.d/${serverName}.deb ${service_config_dir}/${serverName} && ${csudo}chmod a+x ${service_config_dir}/${serverName} + # ${csudo}cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord + ${csudo}cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord + elif ((${os_type} == 2)); then + # ${csudo}cp -f ${script_dir}/init.d/${serverName}.rpm ${install_main_dir}/init.d/${serverName} + ${csudo}cp ${script_dir}/init.d/${serverName}.rpm ${service_config_dir}/${serverName} && ${csudo}chmod a+x ${service_config_dir}/${serverName} + # ${csudo}cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord + ${csudo}cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord + fi + + if ((${initd_mod} == 1)); then + ${csudo}chkconfig --add ${serverName} || : + ${csudo}chkconfig --level 2345 ${serverName} on || : + ${csudo}chkconfig --add tarbitratord || : + ${csudo}chkconfig --level 2345 tarbitratord on || : + elif ((${initd_mod} == 2)); then + ${csudo}insserv ${serverName} || : + ${csudo}insserv -d ${serverName} || : + ${csudo}insserv tarbitratord || : + ${csudo}insserv -d tarbitratord || : + elif ((${initd_mod} == 3)); then + ${csudo}update-rc.d ${serverName} defaults || : + ${csudo}update-rc.d tarbitratord defaults || : + fi } -# taos:2345:respawn:/etc/init.d/taosd start +function clean_service_on_systemd() { + taosd_service_config="${service_config_dir}/${serverName}.service" + if systemctl is-active --quiet ${serverName}; then + echo "${productName} is running, stopping it..." + ${csudo}systemctl stop ${serverName} &>/dev/null || echo &>/dev/null + fi + ${csudo}systemctl disable ${serverName} &>/dev/null || echo &>/dev/null + ${csudo}rm -f ${taosd_service_config} + + tarbitratord_service_config="${service_config_dir}/tarbitratord.service" + if systemctl is-active --quiet tarbitratord; then + echo "tarbitrator is running, stopping it..." + ${csudo}systemctl stop tarbitratord &>/dev/null || echo &>/dev/null + fi + ${csudo}systemctl disable tarbitratord &>/dev/null || echo &>/dev/null + ${csudo}rm -f ${tarbitratord_service_config} + + if [ "$verMode" == "cluster" ]; then + nginx_service_config="${service_config_dir}/nginxd.service" + if systemctl is-active --quiet nginxd; then + echo "Nginx for ${productName} is running, stopping it..." + ${csudo}systemctl stop nginxd &>/dev/null || echo &>/dev/null + fi + ${csudo}systemctl disable nginxd &>/dev/null || echo &>/dev/null + ${csudo}rm -f ${nginx_service_config} + fi +} function install_service_on_systemd() { - clean_service_on_systemd - - taosd_service_config="${service_config_dir}/taosd.service" - ${csudo} bash -c "echo '[Unit]' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'Description=TDengine server service' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'After=network-online.target taosadapter.service' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'Wants=network-online.target taosadapter.service' >> ${taosd_service_config}" - ${csudo} bash -c "echo >> ${taosd_service_config}" - ${csudo} bash -c "echo '[Service]' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'ExecStart=/usr/bin/taosd' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'ExecStartPre=/usr/local/taos/bin/startPre.sh' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'StandardOutput=null' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'Restart=always' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${taosd_service_config}" - #${csudo} bash -c "echo 'StartLimitIntervalSec=60s' >> ${taosd_service_config}" - ${csudo} bash -c "echo >> ${taosd_service_config}" - ${csudo} bash -c "echo '[Install]' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${taosd_service_config}" - ${csudo} systemctl enable taosd - - ${csudo} systemctl daemon-reload + clean_service_on_systemd + + [ -f ${script_dir}/cfg/${serverName}.service ] && + ${csudo}cp ${script_dir}/cfg/${serverName}.service \ + ${service_config_dir}/ || : + ${csudo}systemctl daemon-reload + + ${csudo}systemctl enable ${serverName} + + [ -f ${script_dir}/cfg/tarbitratord.service ] && + ${csudo}cp ${script_dir}/cfg/tarbitratord.service \ + ${service_config_dir}/ || : + ${csudo}systemctl daemon-reload + + if [ "$verMode" == "cluster" ]; then + [ -f ${script_dir}/cfg/nginxd.service ] && + ${csudo}cp ${script_dir}/cfg/nginxd.service \ + ${service_config_dir}/ || : + ${csudo}systemctl daemon-reload + + if ! ${csudo}systemctl enable nginxd &>/dev/null; then + ${csudo}systemctl daemon-reexec + ${csudo}systemctl enable nginxd + fi + ${csudo}systemctl start nginxd + fi +} + +function install_adapter_service() { + if ((${service_mod} == 0)); then + [ -f ${script_dir}/cfg/${adapterName}.service ] && + ${csudo}cp ${script_dir}/cfg/${adapterName}.service \ + ${service_config_dir}/ || : + ${csudo}systemctl daemon-reload + fi } function install_service() { - # if ((${service_mod}==0)); then - # install_service_on_systemd - # elif ((${service_mod}==1)); then - # install_service_on_sysvinit - # else - # # must manual stop taosd - kill_process taosd - # fi + if ((${service_mod} == 0)); then + install_service_on_systemd + elif ((${service_mod} == 1)); then + install_service_on_sysvinit + else + kill_process ${serverName} + fi } -function install_config() { - if [ ! -f ${cfg_install_dir}/${configFile} ]; then - ${csudo}mkdir -p ${cfg_install_dir} - [ -f ${script_dir}/cfg/${configFile} ] && ${csudo}cp ${script_dir}/cfg/${configFile} ${cfg_install_dir} - ${csudo}chmod 644 ${cfg_install_dir}/* +vercomp() { + if [[ $1 == $2 ]]; then + return 0 + fi + local IFS=. + local i ver1=($1) ver2=($2) + # fill empty fields in ver1 with zeros + for ((i = ${#ver1[@]}; i < ${#ver2[@]}; i++)); do + ver1[i]=0 + done + + for ((i = 0; i < ${#ver1[@]}; i++)); do + if [[ -z ${ver2[i]} ]]; then + # fill empty fields in ver2 with zeros + ver2[i]=0 + fi + if ((10#${ver1[i]} > 10#${ver2[i]})); then + return 1 fi + if ((10#${ver1[i]} < 10#${ver2[i]})); then + return 2 + fi + done + return 0 +} + +function is_version_compatible() { + + curr_version=$(ls ${script_dir}/driver/libtaos.so* | awk -F 'libtaos.so.' '{print $2}') - ${csudo}cp -f ${script_dir}/cfg/${configFile} ${install_main_dir}/cfg/${configFile}.org - ${csudo}ln -s ${cfg_install_dir}/${configFile} ${install_main_dir}/cfg + if [ -f ${script_dir}/driver/vercomp.txt ]; then + min_compatible_version=$(cat ${script_dir}/driver/vercomp.txt) + else + min_compatible_version=$(${script_dir}/bin/${serverName} -V | head -1 | cut -d ' ' -f 5) + fi + + exist_version=$(${installDir}/bin/${serverName} -V | head -1 | cut -d ' ' -f 3) + vercomp $exist_version "2.0.16.0" + case $? in + 2) + prompt_force=1 + ;; + esac + + vercomp $curr_version $min_compatible_version + echo "" # avoid $? value not update + + case $? in + 0) return 0 ;; + 1) return 0 ;; + 2) return 1 ;; + esac } -function install_TDengine() { - # Start to install - echo -e "${GREEN}Start to install TDengine...${NC}" +function updateProduct() { + # Check if version compatible + if ! is_version_compatible; then + echo -e "${RED}Version incompatible${NC}" + return 1 + fi + + # Start to update + if [ ! -e ${tarName} ]; then + echo "File ${tarName} does not exist" + exit 1 + fi + tar -zxf ${tarName} + install_jemalloc + + echo -e "${GREEN}Start to update ${productName}...${NC}" + # Stop the service if running + if pidof ${serverName} &>/dev/null; then + if ((${service_mod} == 0)); then + ${csudo}systemctl stop ${serverName} || : + elif ((${service_mod} == 1)); then + ${csudo}service ${serverName} stop || : + else + kill_process ${serverName} + fi + sleep 1 + fi - install_main_path - install_data - install_log - install_header - install_lib - install_taosTools - - if [ -z $1 ]; then # install service and client - # For installing new - install_bin - install_service - install_config - - # Ask if to start the service - #echo - #echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" - echo - echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg" - if ((${service_mod}==0)); then - echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} systemctl start taosd${NC}" - elif ((${service_mod}==1)); then - echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} service taosd start${NC}" + if [ "$verMode" == "cluster" ]; then + if pidof nginx &>/dev/null; then + if ((${service_mod} == 0)); then + ${csudo}systemctl stop nginxd || : + elif ((${service_mod} == 1)); then + ${csudo}service nginxd stop || : + else + kill_process nginx + fi + sleep 1 + fi + fi + + install_main_path + + install_log + install_header + install_lib + + if [ "$verMode" == "cluster" ]; then + install_connector + fi + + install_examples + if [ -z $1 ]; then + install_bin + install_service + install_adapter_service + install_config + install_adapter_config + + openresty_work=false + if [ "$verMode" == "cluster" ]; then + # Check if openresty is installed + # Check if nginx is installed successfully + if type curl &>/dev/null; then + if curl -sSf http://127.0.0.1:${nginx_port} &>/dev/null; then + echo -e "\033[44;32;1mNginx for ${productName} is updated successfully!${NC}" + openresty_work=true else - echo -e "${GREEN_DARK}To start TDengine ${NC}: taosd${NC}" + echo -e "\033[44;31;5mNginx for ${productName} does not work! Please try again!\033[0m" fi + fi + fi - if [ ! -z "$firstEp" ]; then - tmpFqdn=${firstEp%%:*} - substr=":" - if [[ $firstEp =~ $substr ]];then - tmpPort=${firstEp#*:} - else - tmpPort="" - fi - if [[ "$tmpPort" != "" ]];then - echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}" - else - echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}" - fi - echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}" - echo - elif [ ! -z "$serverFqdn" ]; then - echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $serverFqdn${GREEN_DARK} to login into TDengine server${NC}" - echo - fi + echo + echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${cfg_install_dir}/${configFile}" + echo -e "${GREEN_DARK}To configure Adapter (if has) ${NC}: edit ${cfg_install_dir}/${adapterName}.toml" + if ((${service_mod} == 0)); then + echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}" + elif ((${service_mod} == 1)); then + echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}" + else + echo -e "${GREEN_DARK}To start Adapter (if has)${NC}: ${adapterName} &${NC}" + echo -e "${GREEN_DARK}To start ${productName} ${NC}: ./${serverName}${NC}" + fi + + if [ ${openresty_work} = 'true' ]; then + echo -e "${GREEN_DARK}To access ${productName} ${NC}: use ${GREEN_UNDERLINE}${clientName} -h $serverFqdn${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}" + else + echo -e "${GREEN_DARK}To access ${productName} ${NC}: use ${GREEN_UNDERLINE}${clientName} -h $serverFqdn${NC} in shell${NC}" + fi - echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" - echo - else # Only install client - install_bin - install_config - echo - echo -e "\033[44;32;1mTDengine client is installed successfully!${NC}" + if ((${prompt_force} == 1)); then + echo "" + echo -e "${RED}Please run '${serverName} --force-keep-file' at first time for the exist ${productName} $exist_version!${NC}" fi + echo + echo -e "\033[44;32;1m${productName} is updated successfully!${NC}" + else + install_bin + install_config + + echo + echo -e "\033[44;32;1m${productName} client is updated successfully!${NC}" + fi - touch ~/.taos_history + rm -rf $(tar -tf ${tarName} | grep -v "^\./$") } +function installProduct() { + # Start to install + if [ ! -e ${tarName} ]; then + echo "File ${tarName} does not exist" + exit 1 + fi + tar -zxf ${tarName} + + echo -e "${GREEN}Start to install ${productName}...${NC}" + + install_main_path + + if [ -z $1 ]; then + install_data + fi + + install_log + install_header + install_lib + install_jemalloc + #install_avro lib + #install_avro lib64 + + if [ "$verMode" == "cluster" ]; then + install_connector + fi + install_examples + + if [ -z $1 ]; then # install service and client + # For installing new + install_bin + install_service + install_adapter_service + install_adapter_config + + openresty_work=false + if [ "$verMode" == "cluster" ]; then + # Check if nginx is installed successfully + if type curl &>/dev/null; then + if curl -sSf http://127.0.0.1:${nginx_port} &>/dev/null; then + echo -e "\033[44;32;1mNginx for ${productName} is installed successfully!${NC}" + openresty_work=true + else + echo -e "\033[44;31;5mNginx for ${productName} does not work! Please try again!\033[0m" + fi + fi + fi + + install_config + + # Ask if to start the service + echo + echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${cfg_install_dir}/${configFile}" + echo -e "${GREEN_DARK}To configure ${adapterName} (if has) ${NC}: edit ${cfg_install_dir}/${adapterName}.toml" + if ((${service_mod} == 0)); then + echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}" + elif ((${service_mod} == 1)); then + echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}" + else + echo -e "${GREEN_DARK}To start Adapter (if has)${NC}: ${adapterName} &${NC}" + echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${serverName}${NC}" + fi + + if [ ! -z "$firstEp" ]; then + tmpFqdn=${firstEp%%:*} + substr=":" + if [[ $firstEp =~ $substr ]]; then + tmpPort=${firstEp#*:} + else + tmpPort="" + fi + if [[ "$tmpPort" != "" ]]; then + echo -e "${GREEN_DARK}To access ${productName} ${NC}: ${clientName} -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}" + else + echo -e "${GREEN_DARK}To access ${productName} ${NC}: ${clientName} -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}" + fi + echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}" + echo + elif [ ! -z "$serverFqdn" ]; then + echo -e "${GREEN_DARK}To access ${productName} ${NC}: ${clientName} -h $serverFqdn${GREEN_DARK} to login into ${productName} server${NC}" + echo + fi + + echo -e "\033[44;32;1m${productName} is installed successfully!${NC}" + echo + else # Only install client + install_bin + install_config + echo + echo -e "\033[44;32;1m${productName} client is installed successfully!${NC}" + fi + + touch ~/.${historyFile} + rm -rf $(tar -tf ${tarName} | grep -v "^\./$") +} ## ==============================Main program starts from here============================ serverFqdn=$(hostname) if [ "$verType" == "server" ]; then - # Install server and client - install_TDengine + # Install server and client + if [ -x ${bin_dir}/${serverName} ]; then + update_flag=1 + updateProduct + else + installProduct + fi elif [ "$verType" == "client" ]; then - interactiveFqdn=no - # Only install client - install_TDengine client + interactiveFqdn=no + # Only install client + if [ -x ${bin_dir}/${clientName} ]; then + update_flag=1 + updateProduct client + else + installProduct client + fi else - echo "please input correct verType" + echo "please input correct verType" fi diff --git a/packaging/tools/install_client.sh b/packaging/tools/install_client.sh index 4cf95454e022da6f8d3e497d335175d86da486c5..5f449e5d91122522d595eb2ccfb948aa4f8a66fe 100755 --- a/packaging/tools/install_client.sh +++ b/packaging/tools/install_client.sh @@ -17,6 +17,7 @@ serverName="taosd" clientName="taos" uninstallScript="rmtaos" configFile="taos.cfg" +tarName="taos.tar.gz" osType=Linux pagMode=full @@ -242,6 +243,11 @@ function install_examples() { function update_TDengine() { # Start to update + if [ ! -e ${tarName} ]; then + echo "File ${tarName} does not exist" + exit 1 + fi + tar -zxf ${tarName} echo -e "${GREEN}Start to update ${productName} client...${NC}" # Stop the client shell if running if pidof ${clientName} &> /dev/null; then @@ -264,42 +270,49 @@ function update_TDengine() { echo echo -e "\033[44;32;1m${productName} client is updated successfully!${NC}" + + rm -rf $(tar -tf ${tarName}) } function install_TDengine() { - # Start to install - echo -e "${GREEN}Start to install ${productName} client...${NC}" - - install_main_path - install_log - install_header - install_lib - install_jemalloc - if [ "$verMode" == "cluster" ]; then - install_connector - fi - install_examples - install_bin - install_config + # Start to install + if [ ! -e ${tarName} ]; then + echo "File ${tarName} does not exist" + exit 1 + fi + tar -zxf ${tarName} + echo -e "${GREEN}Start to install ${productName} client...${NC}" - echo - echo -e "\033[44;32;1m${productName} client is installed successfully!${NC}" + install_main_path + install_log + install_header + install_lib + install_jemalloc + if [ "$verMode" == "cluster" ]; then + install_connector + fi + install_examples + install_bin + install_config + + echo + echo -e "\033[44;32;1m${productName} client is installed successfully!${NC}" - rm -rf $(tar -tf ${tarName}) + rm -rf $(tar -tf ${tarName}) } ## ==============================Main program starts from here============================ # Install or updata client and client # if server is already install, don't install client - if [ -e ${bin_dir}/${serverName} ]; then - echo -e "\033[44;32;1mThere are already installed ${productName} server, so don't need install client!${NC}" - exit 0 - fi +if [ -e ${bin_dir}/${serverName} ]; then + echo -e "\033[44;32;1mThere are already installed ${productName} server, so don't need install client!${NC}" + exit 0 +fi - if [ -x ${bin_dir}/${clientName} ]; then - update_flag=1 - update_TDengine - else - install_TDengine - fi +if [ -x ${bin_dir}/${clientName} ]; then + update_flag=1 + update_TDengine +else + install_TDengine +fi diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h index 516b289f08d1c36dcbc3e7f513da6d72f727fa81..d9f33510088cf228215edf0f77368334edd4b956 100644 --- a/source/client/inc/clientInt.h +++ b/source/client/inc/clientInt.h @@ -121,7 +121,7 @@ struct SAppInstInfo { SCorEpSet mgmtEp; SInstanceSummary summary; SList* pConnList; // STscObj linked list - int64_t clusterId; + uint64_t clusterId; void* pTransporter; SAppHbMgr* pAppHbMgr; }; @@ -286,6 +286,8 @@ void initMsgHandleFp(); TAOS* taos_connect_internal(const char* ip, const char* user, const char* pass, const char* auth, const char* db, uint16_t port, int connType); +SRequestObj* launchQuery(STscObj* pTscObj, const char* sql, int sqlLen); + int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtCallback* pStmtCb); int32_t getPlan(SRequestObj* pRequest, SQuery* pQuery, SQueryPlan** pPlan, SArray* pNodeList); diff --git a/source/client/src/clientHb.c b/source/client/src/clientHb.c index b068e13b7d819230c2cf09cdf267b2d7dcee7cde..a9c5cd06f668ba625dee6d13c44261ef2badf8bb 100644 --- a/source/client/src/clientHb.c +++ b/source/client/src/clientHb.c @@ -140,8 +140,10 @@ static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) { STscObj *pTscObj = (STscObj *)acquireTscObj(pRsp->connKey.tscRid); if (NULL == pTscObj) { tscDebug("tscObj rid %" PRIx64 " not exist", pRsp->connKey.tscRid); - } else { - updateEpSet_s(&pTscObj->pAppInfo->mgmtEp, &pRsp->query->epSet); + } else { + if (pRsp->query->totalDnodes > 1 && !isEpsetEqual(&pTscObj->pAppInfo->mgmtEp.epSet, &pRsp->query->epSet)) { + updateEpSet_s(&pTscObj->pAppInfo->mgmtEp, &pRsp->query->epSet); + } pTscObj->connId = pRsp->query->connId; if (pRsp->query->killRid) { @@ -310,6 +312,8 @@ int32_t hbBuildQueryDesc(SQueryHbReqBasic *hbBasic, STscObj *pObj) { taosArrayDestroy(desc.subDesc); desc.subDesc = NULL; } + } else { + desc.subDesc = NULL; } releaseRequest(*rid); @@ -578,8 +582,15 @@ void hbClearReqInfo(SAppHbMgr *pAppHbMgr) { } } +void hbThreadFuncUnexpectedStopped(void) { + atomic_store_8(&clientHbMgr.threadStop, 2); +} + static void *hbThreadFunc(void *param) { setThreadName("hb"); +#ifdef WINDOWS + atexit(hbThreadFuncUnexpectedStopped); +#endif while (1) { int8_t threadStop = atomic_val_compare_exchange_8(&clientHbMgr.threadStop, 1, 2); if (1 == threadStop) { diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index b1428858418a499a0260376eb4b1b6af60c06ea7..eb4c4cb59feac8c8a0db6cd85f45f3482b31e96f 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -289,10 +289,56 @@ void setResPrecision(SReqResultInfo* pResInfo, int32_t precision) { pResInfo->precision = precision; } +int32_t scheduleAsyncQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList, void** pRes) { + void* pTransporter = pRequest->pTscObj->pAppInfo->pTransporter; + + tsem_init(&schdRspSem, 0, 0); + + SQueryResult res = {.code = 0, .numOfRows = 0}; + int32_t code = schedulerAsyncExecJob(pTransporter, pNodeList, pDag, &pRequest->body.queryJob, pRequest->sqlstr, + pRequest->metric.start, schdExecCallback, &res); + while (true) { + if (code != TSDB_CODE_SUCCESS) { + if (pRequest->body.queryJob != 0) { + schedulerFreeJob(pRequest->body.queryJob); + } + + *pRes = res.res; + + pRequest->code = code; + terrno = code; + return pRequest->code; + } else { + tsem_wait(&schdRspSem); + + if (res.code) { + code = res.code; + } else { + break; + } + } + } + + if (TDMT_VND_SUBMIT == pRequest->type || TDMT_VND_CREATE_TABLE == pRequest->type) { + pRequest->body.resInfo.numOfRows = res.numOfRows; + + if (pRequest->body.queryJob != 0) { + schedulerFreeJob(pRequest->body.queryJob); + } + } + + *pRes = res.res; + + pRequest->code = res.code; + terrno = res.code; + return pRequest->code; +} + + int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList, void** pRes) { void* pTransporter = pRequest->pTscObj->pAppInfo->pTransporter; - SQueryResult res = {.code = 0, .numOfRows = 0, .msgSize = ERROR_MSG_BUF_DEFAULT_SIZE, .msg = pRequest->msgBuf}; + SQueryResult res = {.code = 0, .numOfRows = 0}; int32_t code = schedulerExecJob(pTransporter, pNodeList, pDag, &pRequest->body.queryJob, pRequest->sqlstr, pRequest->metric.start, &res); if (code != TSDB_CODE_SUCCESS) { @@ -348,8 +394,8 @@ int32_t validateSversion(SRequestObj* pRequest, void* res) { if (NULL == blk->tblFName || 0 == blk->tblFName[0]) { continue; } - - STbSVersion tbSver = {.tbFName = blk->tblFName, .sver = blk->sver}; + + STbSVersion tbSver = {.tbFName = blk->tblFName, .sver = blk->sver}; taosArrayPush(pArray, &tbSver); } } else if (TDMT_VND_QUERY == pRequest->type) { @@ -367,7 +413,7 @@ int32_t validateSversion(SRequestObj* pRequest, void* res) { for (int32_t i = 0; i < tbNum; ++i) { STbVerInfo* tbInfo = taosArrayGet(pTbArray, i); - STbSVersion tbSver = {.tbFName = tbInfo->tbFName, .sver = tbInfo->sversion}; + STbSVersion tbSver = {.tbFName = tbInfo->tbFName, .sver = tbInfo->sversion, .tver = tbInfo->tversion}; taosArrayPush(pArray, &tbSver); } } @@ -506,12 +552,12 @@ int32_t refreshMeta(STscObj* pTscObj, SRequestObj* pRequest) { int32_t removeMeta(STscObj* pTscObj, SArray* tbList) { SCatalog* pCatalog = NULL; - int32_t tbNum = taosArrayGetSize(tbList); - int32_t code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog); + int32_t tbNum = taosArrayGetSize(tbList); + int32_t code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog); if (code != TSDB_CODE_SUCCESS) { return code; } - + for (int32_t i = 0; i < tbNum; ++i) { SName* pTbName = taosArrayGet(tbList, i); catalogRemoveTableMeta(pCatalog, pTbName); @@ -520,7 +566,6 @@ int32_t removeMeta(STscObj* pTscObj, SArray* tbList) { return TSDB_CODE_SUCCESS; } - SRequestObj* execQuery(STscObj* pTscObj, const char* sql, int sqlLen) { SRequestObj* pRequest = NULL; int32_t retryNum = 0; @@ -543,7 +588,7 @@ SRequestObj* execQuery(STscObj* pTscObj, const char* sql, int sqlLen) { if (NEED_CLIENT_RM_TBLMETA_REQ(pRequest->type)) { removeMeta(pTscObj, pRequest->tableList); } - + return pRequest; } @@ -684,28 +729,55 @@ static void destroySendMsgInfo(SMsgSendInfo* pMsgBody) { taosMemoryFreeClear(pMsgBody); } -bool persistConnForSpecificMsg(void* parenct, tmsg_t msgType) { - return msgType == TDMT_VND_QUERY_RSP || msgType == TDMT_VND_FETCH_RSP || msgType == TDMT_VND_RES_READY_RSP || - msgType == TDMT_VND_QUERY_HEARTBEAT_RSP; +void updateTargetEpSet(SMsgSendInfo* pSendInfo, STscObj* pTscObj, SRpcMsg* pMsg, SEpSet* pEpSet) { + if (NULL == pEpSet) { + return; + } + + switch (pSendInfo->target.type) { + case TARGET_TYPE_MNODE: + if (NULL == pTscObj) { + tscError("mnode epset changed but not able to update it, reqObjRefId:%" PRIx64, pSendInfo->requestObjRefId); + return; + } + + updateEpSet_s(&pTscObj->pAppInfo->mgmtEp, pEpSet); + break; + case TARGET_TYPE_VNODE: { + if (NULL == pTscObj) { + tscError("vnode epset changed but not able to update it, reqObjRefId:%" PRIx64, pSendInfo->requestObjRefId); + return; + } + + SCatalog* pCatalog = NULL; + int32_t code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog); + if (code != TSDB_CODE_SUCCESS) { + tscError("fail to get catalog handle, clusterId:%" PRIx64 ", error %s", pTscObj->pAppInfo->clusterId, tstrerror(code)); + return; + } + + catalogUpdateVgEpSet(pCatalog, pSendInfo->target.dbFName, pSendInfo->target.vgId, pEpSet); + break; + } + default: + tscDebug("epset changed, not updated, msgType %s", TMSG_INFO(pMsg->msgType)); + break; + } } + void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) { SMsgSendInfo* pSendInfo = (SMsgSendInfo*)pMsg->info.ahandle; assert(pMsg->info.ahandle != NULL); + SRequestObj* pRequest = NULL; + STscObj* pTscObj = NULL; if (pSendInfo->requestObjRefId != 0) { SRequestObj* pRequest = (SRequestObj*)taosAcquireRef(clientReqRefPool, pSendInfo->requestObjRefId); assert(pRequest->self == pSendInfo->requestObjRefId); pRequest->metric.rsp = taosGetTimestampUs(); - - STscObj* pTscObj = pRequest->pTscObj; - if (pEpSet) { - if (!isEpsetEqual(&pTscObj->pAppInfo->mgmtEp.epSet, pEpSet)) { - updateEpSet_s(&pTscObj->pAppInfo->mgmtEp, pEpSet); - } - } - + pTscObj = pRequest->pTscObj; /* * There is not response callback function for submit response. * The actual inserted number of points is the first number. @@ -722,6 +794,8 @@ void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) { taosReleaseRef(clientReqRefPool, pSendInfo->requestObjRefId); } + updateTargetEpSet(pSendInfo, pTscObj, pMsg, pEpSet); + SDataBuf buf = {.len = pMsg->contLen, .pData = NULL, .handle = pMsg->info.handle}; if (pMsg->contLen > 0) { @@ -796,7 +870,58 @@ void doSetOneRowPtr(SReqResultInfo* pResultInfo) { } } +void* doAsyncFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4) { + assert(pRequest != NULL); + + SReqResultInfo* pResultInfo = &pRequest->body.resInfo; + if (pResultInfo->pData == NULL || pResultInfo->current >= pResultInfo->numOfRows) { + // All data has returned to App already, no need to try again + if (pResultInfo->completed) { + pResultInfo->numOfRows = 0; + return NULL; + } + + tsem_init(&schdRspSem, 0, 0); + + SReqResultInfo* pResInfo = &pRequest->body.resInfo; + SSchdFetchParam param = {.pData = (void**)&pResInfo->pData, .code = &pRequest->code}; + pRequest->code = schedulerAsyncFetchRows(pRequest->body.queryJob, schdFetchCallback, ¶m); + if (pRequest->code != TSDB_CODE_SUCCESS) { + pResultInfo->numOfRows = 0; + return NULL; + } + + tsem_wait(&schdRspSem); + if (pRequest->code != TSDB_CODE_SUCCESS) { + pResultInfo->numOfRows = 0; + return NULL; + } + + pRequest->code = setQueryResultFromRsp(&pRequest->body.resInfo, (SRetrieveTableRsp*)pResInfo->pData, convertUcs4); + if (pRequest->code != TSDB_CODE_SUCCESS) { + pResultInfo->numOfRows = 0; + return NULL; + } + + tscDebug("0x%" PRIx64 " fetch results, numOfRows:%d total Rows:%" PRId64 ", complete:%d, reqId:0x%" PRIx64, + pRequest->self, pResInfo->numOfRows, pResInfo->totalRows, pResInfo->completed, pRequest->requestId); + + if (pResultInfo->numOfRows == 0) { + return NULL; + } + } + + if (setupOneRowPtr) { + doSetOneRowPtr(pResultInfo); + pResultInfo->current += 1; + } + + return pResultInfo->row; +} + + void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4) { + //return doAsyncFetchRows(pRequest, setupOneRowPtr, convertUcs4); assert(pRequest != NULL); SReqResultInfo* pResultInfo = &pRequest->body.resInfo; @@ -866,8 +991,7 @@ static char* parseTagDatatoJson(void* p) { if (j == 0) { if (*val == TSDB_DATA_TYPE_NULL) { string = taosMemoryCalloc(1, 8); - sprintf(varDataVal(string), "%s", TSDB_DATA_NULL_STR_L); - varDataSetLen(string, strlen(varDataVal(string))); + sprintf(string, "%s", TSDB_DATA_NULL_STR_L); goto end; } continue; @@ -1003,7 +1127,7 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int length = 0; } varDataSetLen(dst, length + CHAR_BYTES * 2); - *(char*)(varDataVal(dst), length + CHAR_BYTES) = '\"'; + *(char*)POINTER_SHIFT(varDataVal(dst), length + CHAR_BYTES) = '\"'; } else if (jsonInnerType == TSDB_DATA_TYPE_DOUBLE) { double jsonVd = *(double*)(jsonInnerData); sprintf(varDataVal(dst), "%.9lf", jsonVd); @@ -1125,6 +1249,8 @@ void resetConnectDB(STscObj* pTscObj) { int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4) { assert(pResultInfo != NULL && pRsp != NULL); + taosMemoryFreeClear(pResultInfo->pRspMsg); + pResultInfo->pRspMsg = (const char*)pRsp; pResultInfo->pData = (void*)pRsp->data; pResultInfo->numOfRows = htonl(pRsp->numOfRows); diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index eec8cab7ad65569f67c63ad5963333550c794ef9..53eb443b36b05393b22667a6f623892008f14ebb 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -565,10 +565,32 @@ const char *taos_get_server_info(TAOS *taos) { void taos_query_a(TAOS *taos, const char *sql, __taos_async_fn_t fp, void *param) { if (taos == NULL || sql == NULL) { - // todo directly call fp + fp(param, NULL, TSDB_CODE_INVALID_PARA); + return; + } + + SRequestObj* pRequest = NULL; + int32_t retryNum = 0; + int32_t code = 0; + + size_t sqlLen = strlen(sql); + + while (retryNum++ < REQUEST_MAX_TRY_TIMES) { + pRequest = launchQuery(taos, sql, sqlLen); + if (pRequest == NULL || TSDB_CODE_SUCCESS == pRequest->code || !NEED_CLIENT_HANDLE_ERROR(pRequest->code)) { + break; + } + + code = refreshMeta(taos, pRequest); + if (code) { + pRequest->code = code; + break; + } + + destroyRequest(pRequest); } - taos_query_l(taos, sql, (int32_t)strlen(sql)); + fp(param, pRequest, code); } void taos_fetch_rows_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) { diff --git a/source/client/src/clientMsgHandler.c b/source/client/src/clientMsgHandler.c index 11c6971e3d250e2f07f3ec48c0e6ef4bdf339a15..f15315fe6055127f13b15849f897d8edda5a381b 100644 --- a/source/client/src/clientMsgHandler.c +++ b/source/client/src/clientMsgHandler.c @@ -58,7 +58,12 @@ int32_t processConnectRsp(void* param, const SDataBuf* pMsg, int32_t code) { return code; } - if (connectRsp.dnodeNum > 1 && !isEpsetEqual(&pTscObj->pAppInfo->mgmtEp.epSet, &connectRsp.epSet)) { + if (connectRsp.dnodeNum == 1) { + SEpSet srcEpSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp); + SEpSet dstEpSet = connectRsp.epSet; + rpcSetDefaultAddr(pTscObj->pAppInfo->pTransporter, srcEpSet.eps[srcEpSet.inUse].fqdn, + dstEpSet.eps[dstEpSet.inUse].fqdn); + } else if (connectRsp.dnodeNum > 1 && !isEpsetEqual(&pTscObj->pAppInfo->mgmtEp.epSet, &connectRsp.epSet)) { updateEpSet_s(&pTscObj->pAppInfo->mgmtEp, &connectRsp.epSet); } @@ -125,9 +130,10 @@ int32_t processUseDbRsp(void* param, const SDataBuf* pMsg, int32_t code) { struct SCatalog* pCatalog = NULL; if (usedbRsp.vgVersion >= 0) { - int32_t code1 = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog); + uint64_t clusterId = pRequest->pTscObj->pAppInfo->clusterId; + int32_t code1 = catalogGetHandle(clusterId, &pCatalog); if (code1 != TSDB_CODE_SUCCESS) { - tscWarn("catalogGetHandle failed, clusterId:%" PRIx64 ", error:%s", pRequest->pTscObj->pAppInfo->clusterId, + tscWarn("0x%" PRIx64 "catalogGetHandle failed, clusterId:%" PRIx64 ", error:%s", pRequest->requestId, clusterId, tstrerror(code1)); } else { catalogRemoveDB(pCatalog, usedbRsp.db, usedbRsp.uid); @@ -158,7 +164,7 @@ int32_t processUseDbRsp(void* param, const SDataBuf* pMsg, int32_t code) { if (output.dbVgroup) taosHashCleanup(output.dbVgroup->vgHash); taosMemoryFreeClear(output.dbVgroup); - tscError("failed to build use db output since %s", terrstr()); + tscError("0x%" PRIx64 " failed to build use db output since %s", pRequest->requestId, terrstr()); } else if (output.dbVgroup) { struct SCatalog* pCatalog = NULL; diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index 68c47c2d13421cd34e327db37e31ae76774985ac..7d623072d664a4b9f1d77251812032f8c4fa4de1 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -24,7 +24,6 @@ #define EQUAL '=' #define QUOTE '"' #define SLASH '\\' -#define tsMaxSQLStringLen (1024*1024) #define JUMP_SPACE(sql) while (*sql != '\0'){if(*sql == SPACE) sql++;else break;} // comma , @@ -63,12 +62,11 @@ for (int i = 1; i < keyLen; ++i) { \ #define TS "_ts" #define TS_LEN 3 -#define VALUE "value" -#define VALUE_LEN 5 +#define VALUE "_value" +#define VALUE_LEN 6 #define BINARY_ADD_LEN 2 // "binary" 2 means " " #define NCHAR_ADD_LEN 3 // L"nchar" 3 means L" " -#define CHAR_SAVE_LENGTH 8 //================================================================================================= typedef TSDB_SML_PROTOCOL_TYPE SMLProtocolType; @@ -253,12 +251,20 @@ static int32_t smlGenerateSchemaAction(SSchema* colField, SHashObj* colHash, SSm return 0; } +static int32_t smlFindNearestPowerOf2(int32_t length){ + int32_t result = 1; + while(result <= length){ + result *= 2; + } + return result; +} + static int32_t smlBuildColumnDescription(SSmlKv* field, char* buf, int32_t bufSize, int32_t* outBytes) { uint8_t type = field->type; char tname[TSDB_TABLE_NAME_LEN] = {0}; memcpy(tname, field->key, field->keyLen); if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { - int32_t bytes = field->length > CHAR_SAVE_LENGTH ? (2*field->length) : CHAR_SAVE_LENGTH; + int32_t bytes = smlFindNearestPowerOf2(field->length); int out = snprintf(buf, bufSize, "`%s` %s(%d)", tname, tDataTypes[field->type].name, bytes); *outBytes = out; @@ -273,8 +279,8 @@ static int32_t smlBuildColumnDescription(SSmlKv* field, char* buf, int32_t bufSi static int32_t smlApplySchemaAction(SSmlHandle* info, SSchemaAction* action) { int32_t code = 0; int32_t outBytes = 0; - char *result = (char *)taosMemoryCalloc(1, tsMaxSQLStringLen+1); - int32_t capacity = tsMaxSQLStringLen + 1; + char *result = (char *)taosMemoryCalloc(1, TSDB_MAX_ALLOWED_SQL_LEN); + int32_t capacity = TSDB_MAX_ALLOWED_SQL_LEN; uDebug("SML:0x%"PRIx64" apply schema action. action: %d", info->id, action->action); switch (action->action) { @@ -398,7 +404,7 @@ static int32_t smlApplySchemaAction(SSmlHandle* info, SSchemaAction* action) { } if(taosArrayGetSize(cols) == 0){ outBytes = snprintf(pos, freeBytes,"`%s` %s(%d)", - tsSmlTagName, tDataTypes[TSDB_DATA_TYPE_NCHAR].name, CHAR_SAVE_LENGTH); + tsSmlTagName, tDataTypes[TSDB_DATA_TYPE_NCHAR].name, 1); pos += outBytes; freeBytes -= outBytes; *pos = ','; ++pos; --freeBytes; } @@ -508,6 +514,11 @@ static int32_t smlModifyDBSchemas(SSmlHandle* info) { if (code != TSDB_CODE_SUCCESS) { return code; } + + code = catalogRefreshTableMeta(info->pCatalog, info->taos->pAppInfo->pTransporter, &ep, &pName, -1); + if (code != TSDB_CODE_SUCCESS) { + return code; + } } else { uError("SML:0x%"PRIx64" load table meta error: %s", info->id, tstrerror(code)); return code; diff --git a/source/client/src/clientStmt.c b/source/client/src/clientStmt.c index 17468584822361d8a3f7ff048cb797a8174b1836..01d785ef73107778c818437c18d98c778d1f8893 100644 --- a/source/client/src/clientStmt.c +++ b/source/client/src/clientStmt.c @@ -48,7 +48,8 @@ int32_t stmtSwitchStatus(STscStmt* pStmt, STMT_STATUS newStatus) { break; case STMT_EXECUTE: if (STMT_TYPE_QUERY == pStmt->sql.type) { - if (STMT_STATUS_NE(ADD_BATCH) && STMT_STATUS_NE(FETCH_FIELDS) && STMT_STATUS_NE(BIND) && STMT_STATUS_NE(BIND_COL)) { + if (STMT_STATUS_NE(ADD_BATCH) && STMT_STATUS_NE(FETCH_FIELDS) && STMT_STATUS_NE(BIND) && + STMT_STATUS_NE(BIND_COL)) { code = TSDB_CODE_TSC_STMT_API_ERROR; } } else { @@ -230,22 +231,6 @@ int32_t stmtParseSql(STscStmt* pStmt) { pStmt->sql.type = STMT_TYPE_QUERY; } -/* - switch (nodeType(pStmt->sql.pQuery->pRoot)) { - case QUERY_NODE_VNODE_MODIF_STMT: - if (0 == pStmt->sql.type) { - pStmt->sql.type = STMT_TYPE_INSERT; - } - break; - case QUERY_NODE_SELECT_STMT: - pStmt->sql.type = STMT_TYPE_QUERY; - break; - default: - tscError("not supported stmt type %d", nodeType(pStmt->sql.pQuery->pRoot)); - STMT_ERR_RET(TSDB_CODE_TSC_STMT_CLAUSE_ERROR); - } -*/ - return TSDB_CODE_SUCCESS; } @@ -823,7 +808,7 @@ _return: code = stmtUpdateTableUid(pStmt, pRsp); } } - + tFreeSSubmitRsp(pRsp); ++pStmt->sql.runTimes; @@ -861,7 +846,7 @@ int stmtIsInsert(TAOS_STMT* stmt, int* insert) { if (pStmt->sql.type) { *insert = (STMT_TYPE_INSERT == pStmt->sql.type || STMT_TYPE_MULTI_INSERT == pStmt->sql.type); } else { - *insert = isInsertSql(pStmt->sql.sqlStr, 0); + *insert = qIsInsertSql(pStmt->sql.sqlStr, 0); } return TSDB_CODE_SUCCESS; diff --git a/source/client/test/clientTests.cpp b/source/client/test/clientTests.cpp index d67a361c21777e0dd164f4cdf89bd90145968bf8..914e5aefc2e16595e3c8831f4255bdb26c4738a9 100644 --- a/source/client/test/clientTests.cpp +++ b/source/client/test/clientTests.cpp @@ -606,7 +606,7 @@ TEST(testCase, projection_query_tables) { } taos_free_result(pRes); - for(int32_t i = 0; i < 100000; i += 20) { + for(int32_t i = 0; i < 1000000; i += 20) { char sql[1024] = {0}; sprintf(sql, "insert into tu values(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)" @@ -626,23 +626,23 @@ TEST(testCase, projection_query_tables) { printf("start to insert next table\n"); - for(int32_t i = 0; i < 100000; i += 20) { - char sql[1024] = {0}; - sprintf(sql, - "insert into tu2 values(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)" - "(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)" - "(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)" - "(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)", - i, i, i + 1, i + 1, i + 2, i + 2, i + 3, i + 3, i + 4, i + 4, i + 5, i + 5, i + 6, i + 6, i + 7, i + 7, - i + 8, i + 8, i + 9, i + 9, i + 10, i + 10, i + 11, i + 11, i + 12, i + 12, i + 13, i + 13, i + 14, i + 14, - i + 15, i + 15, i + 16, i + 16, i + 17, i + 17, i + 18, i + 18, i + 19, i + 19); - TAOS_RES* p = taos_query(pConn, sql); - if (taos_errno(p) != 0) { - printf("failed to insert data, reason:%s\n", taos_errstr(p)); - } - - taos_free_result(p); - } +// for(int32_t i = 0; i < 1000000; i += 20) { +// char sql[1024] = {0}; +// sprintf(sql, +// "insert into tu2 values(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)" +// "(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)" +// "(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)" +// "(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)", +// i, i, i + 1, i + 1, i + 2, i + 2, i + 3, i + 3, i + 4, i + 4, i + 5, i + 5, i + 6, i + 6, i + 7, i + 7, +// i + 8, i + 8, i + 9, i + 9, i + 10, i + 10, i + 11, i + 11, i + 12, i + 12, i + 13, i + 13, i + 14, i + 14, +// i + 15, i + 15, i + 16, i + 16, i + 17, i + 17, i + 18, i + 18, i + 19, i + 19); +// TAOS_RES* p = taos_query(pConn, sql); +// if (taos_errno(p) != 0) { +// printf("failed to insert data, reason:%s\n", taos_errstr(p)); +// } +// +// taos_free_result(p); +// } // pRes = taos_query(pConn, "select * from tu"); // if (taos_errno(pRes) != 0) { @@ -664,7 +664,7 @@ TEST(testCase, projection_query_tables) { // taos_free_result(pRes); taos_close(pConn); } - +#if 0 TEST(testCase, projection_query_stables) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); ASSERT_NE(pConn, nullptr); @@ -705,7 +705,7 @@ TEST(testCase, agg_query_tables) { } taos_free_result(pRes); - pRes = taos_query(pConn, "select tbname from st1"); + pRes = taos_query(pConn, "explain analyze select count(*) from tu interval(1s)"); if (taos_errno(pRes) != 0) { printf("failed to select from table, reason:%s\n", taos_errstr(pRes)); taos_free_result(pRes); @@ -733,5 +733,6 @@ TEST(testCase, agg_query_tables) { taos_free_result(pRes); taos_close(pConn); } +#endif #pragma GCC diagnostic pop diff --git a/source/common/src/systable.c b/source/common/src/systable.c index 9fe7645e2b2c5dab0f2f588013269be53a6756f1..38a6bafe9a5ea2c795b82899e6a6ce91f3ad545d 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -36,7 +36,6 @@ static const SSysDbTableSchema mnodesSchema[] = { {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "role", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "role_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, }; @@ -124,13 +123,17 @@ static const SSysDbTableSchema userStbsSchema[] = { {.name = "table_comment", .bytes = 1024 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, }; -static const SSysDbTableSchema userStreamsSchema[] = { +static const SSysDbTableSchema streamSchema[] = { {.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "user_name", .bytes = 23, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "dest_table", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "sql", .bytes = 1024, .type = TSDB_DATA_TYPE_VARCHAR}, -}; + {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}, + {.name = "source_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "target_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "target_table", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "watermark", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, + {.name = "trigger", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, + }; static const SSysDbTableSchema userTblsSchema[] = { {.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, @@ -197,12 +200,14 @@ static const SSysDbTableSchema vgroupsSchema[] = { {.name = "status", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "nfiles", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, {.name = "file_size", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, + {.name = "tsma", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT}, }; static const SSysDbTableSchema smaSchema[] = { {.name = "sma_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, {.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, }; static const SSysDbTableSchema transSchema[] = { @@ -233,7 +238,7 @@ static const SSysTableMeta infosMeta[] = { {TSDB_INS_TABLE_USER_FUNCTIONS, userFuncSchema, tListLen(userFuncSchema)}, {TSDB_INS_TABLE_USER_INDEXES, userIdxSchema, tListLen(userIdxSchema)}, {TSDB_INS_TABLE_USER_STABLES, userStbsSchema, tListLen(userStbsSchema)}, - {TSDB_INS_TABLE_USER_STREAMS, userStreamsSchema, tListLen(userStreamsSchema)}, + {TSDB_PERFS_TABLE_STREAMS, streamSchema, tListLen(streamSchema)}, {TSDB_INS_TABLE_USER_TABLES, userTblsSchema, tListLen(userTblsSchema)}, {TSDB_INS_TABLE_USER_TABLE_DISTRIBUTED, userTblDistSchema, tListLen(userTblDistSchema)}, {TSDB_INS_TABLE_USER_USERS, userUsersSchema, tListLen(userUsersSchema)}, @@ -306,17 +311,7 @@ static const SSysDbTableSchema querySchema[] = { {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, }; -static const SSysDbTableSchema streamSchema[] = { - {.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}, - {.name = "source_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "target_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "target_table", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "watermark", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, - {.name = "trigger", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, -}; + static const SSysTableMeta perfsMeta[] = { {TSDB_PERFS_TABLE_CONNECTIONS, connectionsSchema, tListLen(connectionsSchema)}, diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 6e1a9c5726b65291390007515e2cc7b8c1d86dd1..d1d5a9fcb2bbe1e748d07215e010e9ed259e3377 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -275,8 +275,10 @@ int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, uint32_t numOfRow1, in doBitmapMerge(pColumnInfoData, numOfRow1, pSource, numOfRow2); - int32_t offset = pColumnInfoData->info.bytes * numOfRow1; - memcpy(pColumnInfoData->pData + offset, pSource->pData, pSource->info.bytes * numOfRow2); + if (pSource->pData) { + int32_t offset = pColumnInfoData->info.bytes * numOfRow1; + memcpy(pColumnInfoData->pData + offset, pSource->pData, pSource->info.bytes * numOfRow2); + } } return numOfRow1 + numOfRow2; @@ -319,14 +321,16 @@ int32_t colDataAssign(SColumnInfoData* pColumnInfoData, const SColumnInfoData* p pColumnInfoData->nullbitmap = tmp; memcpy(pColumnInfoData->nullbitmap, pSource->nullbitmap, BitmapLen(numOfRows)); - int32_t newSize = numOfRows * pColumnInfoData->info.bytes; - tmp = taosMemoryRealloc(pColumnInfoData->pData, newSize); - if (tmp == NULL) { - return TSDB_CODE_OUT_OF_MEMORY; - } + if (pSource->pData) { + int32_t newSize = numOfRows * pColumnInfoData->info.bytes; + tmp = taosMemoryRealloc(pColumnInfoData->pData, newSize); + if (tmp == NULL) { + return TSDB_CODE_OUT_OF_MEMORY; + } - pColumnInfoData->pData = tmp; - memcpy(pColumnInfoData->pData, pSource->pData, pSource->info.bytes * numOfRows); + pColumnInfoData->pData = tmp; + memcpy(pColumnInfoData->pData, pSource->pData, pSource->info.bytes * numOfRows); + } } pColumnInfoData->hasNull = pSource->hasNull; @@ -350,30 +354,29 @@ int32_t blockDataUpdateTsWindow(SSDataBlock* pDataBlock, int32_t tsColumnIndex) return -1; } - int32_t index = (tsColumnIndex == -1)? 0:tsColumnIndex; + int32_t index = (tsColumnIndex == -1) ? 0 : tsColumnIndex; + SColumnInfoData* pColInfoData = taosArrayGet(pDataBlock->pDataBlock, index); if (pColInfoData->info.type != TSDB_DATA_TYPE_TIMESTAMP) { return 0; } - pDataBlock->info.window.skey = *(TSKEY*)colDataGetData(pColInfoData, 0); - pDataBlock->info.window.ekey = *(TSKEY*)colDataGetData(pColInfoData, (pDataBlock->info.rows - 1)); + TSKEY skey = *(TSKEY*)colDataGetData(pColInfoData, 0); + TSKEY ekey = *(TSKEY*)colDataGetData(pColInfoData, (pDataBlock->info.rows - 1)); + + pDataBlock->info.window.skey = TMIN(skey, ekey); + pDataBlock->info.window.ekey = TMAX(skey, ekey); + return 0; } -// if pIndexMap = NULL, merger one column by on column -int32_t blockDataMerge(SSDataBlock* pDest, const SSDataBlock* pSrc, SArray* pIndexMap) { +int32_t blockDataMerge(SSDataBlock* pDest, const SSDataBlock* pSrc) { assert(pSrc != NULL && pDest != NULL); int32_t capacity = pDest->info.capacity; for (int32_t i = 0; i < pDest->info.numOfCols; ++i) { - int32_t mapIndex = i; - // if (pIndexMap) { - // mapIndex = *(int32_t*)taosArrayGet(pIndexMap, i); - // } - SColumnInfoData* pCol2 = taosArrayGet(pDest->pDataBlock, i); - SColumnInfoData* pCol1 = taosArrayGet(pSrc->pDataBlock, mapIndex); + SColumnInfoData* pCol1 = taosArrayGet(pSrc->pDataBlock, i); capacity = pDest->info.capacity; colDataMergeCol(pCol2, pDest->info.rows, &capacity, pCol1, pSrc->info.rows); @@ -605,14 +608,15 @@ int32_t blockDataFromBuf(SSDataBlock* pBlock, const char* buf) { } int32_t blockDataFromBuf1(SSDataBlock* pBlock, const char* buf, size_t capacity) { - pBlock->info.rows = *(int32_t*) buf; - pBlock->info.groupId = *(uint64_t*) (buf + sizeof(int32_t)); + pBlock->info.rows = *(int32_t*)buf; + pBlock->info.groupId = *(uint64_t*)(buf + sizeof(int32_t)); int32_t numOfCols = pBlock->info.numOfCols; const char* pStart = buf + sizeof(uint32_t) + sizeof(uint64_t); for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, i); + pCol->hasNull = true; if (IS_VAR_DATA_TYPE(pCol->info.type)) { size_t metaSize = capacity * sizeof(int32_t); @@ -675,7 +679,7 @@ size_t blockDataGetSerialMetaSize(const SSDataBlock* pBlock) { return sizeof(int32_t) + sizeof(uint64_t) + pBlock->info.numOfCols * sizeof(int32_t); } -double blockDataGetSerialRowSize(const SSDataBlock* pBlock) { +double blockDataGetSerialRowSize(const SSDataBlock* pBlock) { ASSERT(pBlock != NULL); double rowSize = 0; @@ -1155,7 +1159,9 @@ void colInfoDataCleanup(SColumnInfoData* pColumn, uint32_t numOfRows) { if (IS_VAR_DATA_TYPE(pColumn->info.type)) { pColumn->varmeta.length = 0; } else { - memset(pColumn->nullbitmap, 0, BitmapLen(numOfRows)); + if (pColumn->nullbitmap != NULL) { + memset(pColumn->nullbitmap, 0, BitmapLen(numOfRows)); + } } } @@ -1238,7 +1244,7 @@ size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize) { // the true value must be less than the value of nRows int32_t additional = 0; - for(int32_t i = 0; i < pBlock->info.numOfCols; ++i) { + for (int32_t i = 0; i < pBlock->info.numOfCols; ++i) { SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, i); if (IS_VAR_DATA_TYPE(pCol->info.type)) { additional += nRows * sizeof(int32_t); @@ -1248,7 +1254,7 @@ size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize) { } int32_t newRows = (payloadSize - additional) / rowSize; - ASSERT(newRows <= nRows && newRows > 1); + ASSERT(newRows <= nRows && newRows >= 1); return newRows; } @@ -1292,8 +1298,8 @@ static void doShiftBitmap(char* nullBitmap, size_t n, size_t total) { static void colDataTrimFirstNRows(SColumnInfoData* pColInfoData, size_t n, size_t total) { if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) { - memmove(pColInfoData->varmeta.offset, &pColInfoData->varmeta.offset[n], (total - n)); - memset(&pColInfoData->varmeta.offset[total - n - 1], 0, n); + memmove(pColInfoData->varmeta.offset, &pColInfoData->varmeta.offset[n], (total - n) * sizeof(int32_t)); + memset(&pColInfoData->varmeta.offset[total - n], 0, n); } else { int32_t bytes = pColInfoData->info.bytes; memmove(pColInfoData->pData, ((char*)pColInfoData->pData + n * bytes), (total - n) * bytes); @@ -1462,7 +1468,7 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) { } void blockDebugShowData(const SArray* dataBlocks) { - char pBuf[128]; + char pBuf[128] = {0}; int32_t sz = taosArrayGetSize(dataBlocks); for (int32_t i = 0; i < sz; i++) { SSDataBlock* pDataBlock = taosArrayGet(dataBlocks, i); @@ -1510,14 +1516,11 @@ void blockDebugShowData(const SArray* dataBlocks) { * @param pReq * @param pDataBlocks * @param vgId - * @param uid set as parameter temporarily // TODO: remove this parameter, and the executor should set uid in - * SDataBlock->info.uid * @param suid // TODO: check with Liao whether suid response is reasonable * * TODO: colId should be set */ -int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks, STSchema* pTSchema, int32_t vgId, - tb_uid_t uid, tb_uid_t suid) { +int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks, STSchema* pTSchema, int32_t vgId, tb_uid_t suid) { int32_t sz = taosArrayGetSize(pDataBlocks); int32_t bufSize = sizeof(SSubmitReq); for (int32_t i = 0; i < sz; ++i) { @@ -1538,7 +1541,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks int32_t msgLen = sizeof(SSubmitReq); int32_t numOfBlks = 0; SRowBuilder rb = {0}; - tdSRowInit(&rb, 0); // TODO: use the latest version + tdSRowInit(&rb, pTSchema->version); // TODO: use the latest version for (int32_t i = 0; i < sz; ++i) { SSDataBlock* pDataBlock = taosArrayGet(pDataBlocks, i); @@ -1553,7 +1556,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks SSubmitBlk* pSubmitBlk = POINTER_SHIFT(pDataBuf, msgLen); pSubmitBlk->suid = suid; - pSubmitBlk->uid = uid; + pSubmitBlk->uid = pDataBlock->info.groupId; pSubmitBlk->numOfRows = rows; ++numOfBlks; @@ -1564,6 +1567,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks tdSRowResetBuf(&rb, POINTER_SHIFT(pDataBuf, msgLen)); // set row buf printf("|"); bool isStartKey = false; + int32_t offset = 0; for (int32_t k = 0; k < colNum; ++k) { // iterate by column SColumnInfoData* pColInfoData = taosArrayGet(pDataBlock->pDataBlock, k); void* var = POINTER_SHIFT(pColInfoData->pData, j * pColInfoData->info.bytes); @@ -1572,18 +1576,18 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks if (!isStartKey) { isStartKey = true; tdAppendColValToRow(&rb, PRIMARYKEY_TIMESTAMP_COL_ID, TSDB_DATA_TYPE_TIMESTAMP, TD_VTYPE_NORM, var, true, - 0, 0); + offset, k); + } else { - tdAppendColValToRow(&rb, 2, TSDB_DATA_TYPE_TIMESTAMP, TD_VTYPE_NORM, var, true, 8, k); - break; + tdAppendColValToRow(&rb, 2, TSDB_DATA_TYPE_TIMESTAMP, TD_VTYPE_NORM, var, true, offset, k); } break; case TSDB_DATA_TYPE_NCHAR: { - tdAppendColValToRow(&rb, 2, TSDB_DATA_TYPE_NCHAR, TD_VTYPE_NORM, var, true, 8, k); + tdAppendColValToRow(&rb, 2, TSDB_DATA_TYPE_NCHAR, TD_VTYPE_NORM, var, true, offset, k); break; } case TSDB_DATA_TYPE_VARCHAR: { // TSDB_DATA_TYPE_BINARY - tdAppendColValToRow(&rb, 2, TSDB_DATA_TYPE_VARCHAR, TD_VTYPE_NORM, var, true, 8, k); + tdAppendColValToRow(&rb, 2, TSDB_DATA_TYPE_VARCHAR, TD_VTYPE_NORM, var, true, offset, k); break; } case TSDB_DATA_TYPE_VARBINARY: @@ -1595,13 +1599,14 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks break; default: if (pColInfoData->info.type < TSDB_DATA_TYPE_MAX && pColInfoData->info.type > TSDB_DATA_TYPE_NULL) { - tdAppendColValToRow(&rb, 2, pColInfoData->info.type, TD_VTYPE_NORM, var, true, 8, k); + tdAppendColValToRow(&rb, 2, pColInfoData->info.type, TD_VTYPE_NORM, var, true, offset, k); } else { printf("the column type %" PRIi16 " is undefined\n", pColInfoData->info.type); TASSERT(0); } break; } + offset += TYPE_BYTES[pColInfoData->info.type]; } dataLen += TD_ROW_LEN(rb.pBuf); } @@ -1632,7 +1637,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks } SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, bool createTb, int64_t suid, - int32_t vgId) { + const char* stbFullName, int32_t vgId) { SSubmitReq* ret = NULL; // cal size @@ -1648,10 +1653,12 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo if (createTb) { SVCreateTbReq createTbReq = {0}; - createTbReq.name = "a"; + char* cname = taosMemoryCalloc(1, TSDB_TABLE_FNAME_LEN); + snprintf(cname, TSDB_TABLE_FNAME_LEN, "%s:%ld", stbFullName, pDataBlock->info.groupId); + createTbReq.name = cname; createTbReq.flags = 0; createTbReq.type = TSDB_CHILD_TABLE; - createTbReq.ctb.suid = htobe64(suid); + createTbReq.ctb.suid = suid; SKVRowBuilder kvRowBuilder = {0}; if (tdInitKVRowBuilder(&kvRowBuilder) < 0) { @@ -1664,6 +1671,7 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo int32_t code; tEncodeSize(tEncodeSVCreateTbReq, &createTbReq, schemaLen, code); if (code < 0) return NULL; + taosMemoryFree(cname); } cap += sizeof(SSubmitBlk) + schemaLen + rows * maxLen; @@ -1699,7 +1707,9 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo int32_t schemaLen = 0; if (createTb) { SVCreateTbReq createTbReq = {0}; - createTbReq.name = "a"; + char* cname = taosMemoryCalloc(1, TSDB_TABLE_FNAME_LEN); + snprintf(cname, TSDB_TABLE_FNAME_LEN, "%s:%ld", stbFullName, pDataBlock->info.groupId); + createTbReq.name = cname; createTbReq.flags = 0; createTbReq.type = TSDB_CHILD_TABLE; createTbReq.ctb.suid = suid; @@ -1734,8 +1744,12 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo for (int32_t k = 0; k < pTSchema->numOfCols; k++) { const STColumn* pColumn = &pTSchema->columns[k]; SColumnInfoData* pColData = taosArrayGet(pDataBlock->pDataBlock, k); - void* data = colDataGetData(pColData, j); - tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, pColumn->offset, k); + if (colDataIsNull_s(pColData, j)) { + tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NONE, NULL, false, pColumn->offset, k); + } else { + void* data = colDataGetData(pColData, j); + tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, pColumn->offset, k); + } } int32_t rowLen = TD_ROW_LEN(rowData); rowData = POINTER_SHIFT(rowData, rowLen); @@ -1752,3 +1766,99 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo ret->length = htonl(ret->length); return ret; } + +void blockCompressEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_t numOfCols, int8_t needCompress) { + int32_t* actualLen = (int32_t*)data; + data += sizeof(int32_t); + + uint64_t* groupId = (uint64_t*)data; + data += sizeof(uint64_t); + + int32_t* colSizes = (int32_t*)data; + data += numOfCols * sizeof(int32_t); + + *dataLen = (numOfCols * sizeof(int32_t) + sizeof(uint64_t) + sizeof(int32_t)); + + int32_t numOfRows = pBlock->info.rows; + for (int32_t col = 0; col < numOfCols; ++col) { + SColumnInfoData* pColRes = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, col); + + // copy the null bitmap + if (IS_VAR_DATA_TYPE(pColRes->info.type)) { + size_t metaSize = numOfRows * sizeof(int32_t); + memcpy(data, pColRes->varmeta.offset, metaSize); + data += metaSize; + (*dataLen) += metaSize; + } else { + int32_t len = BitmapLen(numOfRows); + memcpy(data, pColRes->nullbitmap, len); + data += len; + (*dataLen) += len; + } + + if (needCompress) { + colSizes[col] = blockCompressColData(pColRes, numOfRows, data, needCompress); + data += colSizes[col]; + (*dataLen) += colSizes[col]; + } else { + colSizes[col] = colDataGetLength(pColRes, numOfRows); + (*dataLen) += colSizes[col]; + memmove(data, pColRes->pData, colSizes[col]); + data += colSizes[col]; + } + + colSizes[col] = htonl(colSizes[col]); + } + + *actualLen = *dataLen; + *groupId = pBlock->info.groupId; +} + +const char* blockCompressDecode(SSDataBlock* pBlock, int32_t numOfCols, int32_t numOfRows, const char* pData) { + blockDataEnsureCapacity(pBlock, numOfRows); + const char* pStart = pData; + + int32_t dataLen = *(int32_t*)pStart; + pStart += sizeof(int32_t); + + pBlock->info.groupId = *(uint64_t*)pStart; + pStart += sizeof(uint64_t); + + int32_t* colLen = (int32_t*)pStart; + pStart += sizeof(int32_t) * numOfCols; + + for (int32_t i = 0; i < numOfCols; ++i) { + colLen[i] = htonl(colLen[i]); + ASSERT(colLen[i] >= 0); + + SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i); + if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) { + pColInfoData->varmeta.length = colLen[i]; + pColInfoData->varmeta.allocLen = colLen[i]; + + memcpy(pColInfoData->varmeta.offset, pStart, sizeof(int32_t) * numOfRows); + pStart += sizeof(int32_t) * numOfRows; + + if (colLen[i] > 0) { + taosMemoryFreeClear(pColInfoData->pData); + pColInfoData->pData = taosMemoryMalloc(colLen[i]); + } + } else { + memcpy(pColInfoData->nullbitmap, pStart, BitmapLen(numOfRows)); + pStart += BitmapLen(numOfRows); + } + + if (colLen[i] > 0) { + memcpy(pColInfoData->pData, pStart, colLen[i]); + } + + // TODO + // setting this flag to true temporarily so aggregate function on stable will + // examine NULL value for non-primary key column + pColInfoData->hasNull = true; + pStart += colLen[i]; + } + + ASSERT(pStart - pData == dataLen); + return pStart; +} \ No newline at end of file diff --git a/source/common/src/tdataformat.c b/source/common/src/tdataformat.c index fe112ba67e0c0b7751f2664f5673badeb1ea6010..e8d7e3ac0933532a4ad4f55509df575d2eaa177b 100644 --- a/source/common/src/tdataformat.c +++ b/source/common/src/tdataformat.c @@ -581,7 +581,52 @@ void tTagFree(STag *pTag) { if (pTag) taosMemoryFree(pTag); } -void tTagGet(STag *pTag, int16_t cid, int8_t type, uint8_t **ppData, int32_t *nData) { +int32_t tTagSet(STag *pTag, SSchema *pSchema, int32_t nCols, int iCol, uint8_t *pData, uint32_t nData, STag **ppTag) { + STagVal *pTagVals; + int16_t nTags = 0; + SSchema *pColumn; + uint8_t *p; + uint32_t n; + + pTagVals = (STagVal *)taosMemoryMalloc(sizeof(*pTagVals) * nCols); + if (pTagVals == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return -1; + } + + for (int32_t i = 0; i < nCols; i++) { + pColumn = &pSchema[i]; + + if (i == iCol) { + p = pData; + n = nData; + } else { + tTagGet(pTag, pColumn->colId, pColumn->type, &p, &n); + } + + if (p == NULL) continue; + + ASSERT(IS_VAR_DATA_TYPE(pColumn->type) || n == pColumn->bytes); + + pTagVals[nTags].cid = pColumn->colId; + pTagVals[nTags].type = pColumn->type; + pTagVals[nTags].nData = n; + pTagVals[nTags].pData = p; + + nTags++; + } + + // create new tag + if (tTagNew(pTagVals, nTags, ppTag) < 0) { + taosMemoryFree(pTagVals); + return -1; + } + + taosMemoryFree(pTagVals); + return 0; +} + +void tTagGet(STag *pTag, int16_t cid, int8_t type, uint8_t **ppData, uint32_t *nData) { STagIdx *pTagIdx = bsearch(&((STagIdx){.cid = cid}), pTag->idx, pTag->nTag, sizeof(STagIdx), tTagIdxCmprFn); if (pTagIdx == NULL) { *ppData = NULL; @@ -597,18 +642,11 @@ void tTagGet(STag *pTag, int16_t cid, int8_t type, uint8_t **ppData, int32_t *nD } } -int32_t tEncodeTag(SEncoder *pEncoder, STag *pTag) { - // return tEncodeBinary(pEncoder, (uint8_t *)pTag, pTag->len); - ASSERT(0); - return 0; +int32_t tEncodeTag(SEncoder *pEncoder, const STag *pTag) { + return tEncodeBinary(pEncoder, (const uint8_t *)pTag, pTag->len); } -int32_t tDecodeTag(SDecoder *pDecoder, const STag **ppTag) { - // uint32_t n; - // return tDecodeBinary(pDecoder, (const uint8_t **)ppTag, &n); - ASSERT(0); - return 0; -} +int32_t tDecodeTag(SDecoder *pDecoder, STag **ppTag) { return tDecodeBinary(pDecoder, (uint8_t **)ppTag, NULL); } #if 1 // =================================================================================================================== static void dataColSetNEleNull(SDataCol *pCol, int nEle); @@ -855,7 +893,7 @@ SDataCols *tdNewDataCols(int maxCols, int maxRows) { pCols->maxCols = maxCols; pCols->numOfRows = 0; pCols->numOfCols = 0; - // pCols->bitmapMode = 0; // calloc already set 0 + pCols->bitmapMode = TSDB_BITMODE_DEFAULT; if (maxCols > 0) { pCols->cols = (SDataCol *)taosMemoryCalloc(maxCols, sizeof(SDataCol)); @@ -899,7 +937,7 @@ int tdInitDataCols(SDataCols *pCols, STSchema *pSchema) { #endif pCols->numOfRows = 0; - pCols->bitmapMode = 0; + pCols->bitmapMode = TSDB_BITMODE_DEFAULT; pCols->numOfCols = schemaNCols(pSchema); for (i = 0; i < schemaNCols(pSchema); ++i) { @@ -1087,7 +1125,7 @@ SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder) { kvRowSetNCols(row, pBuilder->nCols); kvRowSetLen(row, tlen); - if(pBuilder->nCols > 0){ + if (pBuilder->nCols > 0) { memcpy(kvRowColIdx(row), pBuilder->pColIdx, sizeof(SColIdx) * pBuilder->nCols); memcpy(kvRowValues(row), pBuilder->buf, pBuilder->size); } diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 1b61a0bc606aa9fd479cf996668756d2b88f4702..141ec4f03b76238d6c15695c7ea3a8ea112d9e4b 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -79,9 +79,10 @@ uint16_t tsTelemPort = 80; // schemaless char tsSmlTagName[TSDB_COL_NAME_LEN] = "_tag_null"; -char tsSmlChildTableName[TSDB_TABLE_NAME_LEN] = ""; //user defined child table name can be specified in tag value. - //If set to empty system will generate table name using MD5 hash. -bool tsSmlDataFormat = true; // true means that the name and order of cols in each line are the same(only for influx protocol) +char tsSmlChildTableName[TSDB_TABLE_NAME_LEN] = ""; // user defined child table name can be specified in tag value. + // If set to empty system will generate table name using MD5 hash. +bool tsSmlDataFormat = + true; // true means that the name and order of cols in each line are the same(only for influx protocol) // query int32_t tsQueryPolicy = 1; @@ -292,6 +293,7 @@ int32_t taosAddClientLogCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "jniDebugFlag", jniDebugFlag, 0, 255, 1) != 0) return -1; if (cfgAddInt32(pCfg, "simDebugFlag", 143, 0, 255, 1) != 0) return -1; if (cfgAddInt32(pCfg, "debugFlag", 0, 0, 255, 1) != 0) return -1; + if (cfgAddInt32(pCfg, "idxDebugFlag", idxDebugFlag, 0, 255, 1) != 0) return -1; return 0; } @@ -307,6 +309,7 @@ static int32_t taosAddServerLogCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "fsDebugFlag", fsDebugFlag, 0, 255, 0) != 0) return -1; if (cfgAddInt32(pCfg, "fnDebugFlag", fnDebugFlag, 0, 255, 0) != 0) return -1; if (cfgAddInt32(pCfg, "smaDebugFlag", smaDebugFlag, 0, 255, 0) != 0) return -1; + if (cfgAddInt32(pCfg, "idxDebugFlag", idxDebugFlag, 0, 255, 0) != 0) return -1; return 0; } @@ -479,6 +482,7 @@ static void taosSetClientLogCfg(SConfig *pCfg) { rpcDebugFlag = cfgGetItem(pCfg, "rpcDebugFlag")->i32; tmrDebugFlag = cfgGetItem(pCfg, "tmrDebugFlag")->i32; jniDebugFlag = cfgGetItem(pCfg, "jniDebugFlag")->i32; + idxDebugFlag = cfgGetItem(pCfg, "idxDebugFlag")->i32; } static void taosSetServerLogCfg(SConfig *pCfg) { @@ -493,6 +497,7 @@ static void taosSetServerLogCfg(SConfig *pCfg) { fsDebugFlag = cfgGetItem(pCfg, "fsDebugFlag")->i32; fnDebugFlag = cfgGetItem(pCfg, "fnDebugFlag")->i32; smaDebugFlag = cfgGetItem(pCfg, "smaDebugFlag")->i32; + idxDebugFlag = cfgGetItem(pCfg, "idxDebugFlag")->i32; } static int32_t taosSetClientCfg(SConfig *pCfg) { diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index cc333ae5c8cb730390120e4d5d25a36318bfaf31..7615f7b070ca350a27cc7d6a05520386fcaa6759 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -28,7 +28,7 @@ #undef TD_MSG_SEG_CODE_ #include "tmsgdef.h" -int32_t tInitSubmitMsgIter(const SSubmitReq *pMsg, SSubmitMsgIter *pIter) { +int32_t tInitSubmitMsgIter(SSubmitReq *pMsg, SSubmitMsgIter *pIter) { if (pMsg == NULL) { terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP; return -1; @@ -102,7 +102,7 @@ STSRow *tGetSubmitBlkNext(SSubmitBlkIter *pIter) { } } -int32_t tPrintFixedSchemaSubmitReq(const SSubmitReq *pReq, STSchema *pTschema) { +int32_t tPrintFixedSchemaSubmitReq(SSubmitReq *pReq, STSchema *pTschema) { SSubmitMsgIter msgIter = {0}; if (tInitSubmitMsgIter(pReq, &msgIter) < 0) return -1; while (true) { @@ -600,7 +600,8 @@ int32_t tSerializeSMAlterStbReq(void *buf, int32_t bufLen, SMAlterStbReq *pReq) if (tStartEncode(&encoder) < 0) return -1; if (tEncodeCStr(&encoder, pReq->name) < 0) return -1; if (tEncodeI8(&encoder, pReq->alterType) < 0) return -1; - if (tEncodeI32(&encoder, pReq->verInBlock) < 0) return -1; + if (tEncodeI32(&encoder, pReq->tagVer) < 0) return -1; + if (tEncodeI32(&encoder, pReq->colVer) < 0) return -1; if (tEncodeI32(&encoder, pReq->numOfFields) < 0) return -1; for (int32_t i = 0; i < pReq->numOfFields; ++i) { SField *pField = taosArrayGet(pReq->pFields, i); @@ -627,7 +628,8 @@ int32_t tDeserializeSMAlterStbReq(void *buf, int32_t bufLen, SMAlterStbReq *pReq if (tStartDecode(&decoder) < 0) return -1; if (tDecodeCStrTo(&decoder, pReq->name) < 0) return -1; if (tDecodeI8(&decoder, &pReq->alterType) < 0) return -1; - if (tDecodeI32(&decoder, &pReq->verInBlock) < 0) return -1; + if (tDecodeI32(&decoder, &pReq->tagVer) < 0) return -1; + if (tDecodeI32(&decoder, &pReq->colVer) < 0) return -1; if (tDecodeI32(&decoder, &pReq->numOfFields) < 0) return -1; pReq->pFields = taosArrayInit(pReq->numOfFields, sizeof(SField)); if (pReq->pFields == NULL) { @@ -663,22 +665,24 @@ void tFreeSMAltertbReq(SMAlterStbReq *pReq) { taosArrayDestroy(pReq->pFields); pReq->pFields = NULL; } -int32_t tSerializeSMEpSet(void *buf, int32_t bufLen, SMEpSet *pReq) { + +int32_t tSerializeSEpSet(void *buf, int32_t bufLen, const SEpSet *pEpset) { SEncoder encoder = {0}; tEncoderInit(&encoder, buf, bufLen); if (tStartEncode(&encoder) < 0) return -1; - if (tEncodeSEpSet(&encoder, &pReq->epSet) < 0) return -1; + if (tEncodeSEpSet(&encoder, pEpset) < 0) return -1; tEndEncode(&encoder); int32_t tlen = encoder.pos; tEncoderClear(&encoder); return tlen; } -int32_t tDeserializeSMEpSet(void *buf, int32_t bufLen, SMEpSet *pReq) { + +int32_t tDeserializeSEpSet(void *buf, int32_t bufLen, SEpSet *pEpset) { SDecoder decoder = {0}; tDecoderInit(&decoder, buf, bufLen); if (tStartDecode(&decoder) < 0) return -1; - if (tDecodeSEpSet(&decoder, &pReq->epSet) < 0) return -1; + if (tDecodeSEpSet(&decoder, pEpset) < 0) return -1; tEndDecode(&decoder); tDecoderClear(&decoder); @@ -891,6 +895,9 @@ int32_t tSerializeSStatusReq(void *buf, int32_t bufLen, SStatusReq *pReq) { if (tEncodeI64(&encoder, pload->pointsWritten) < 0) return -1; } + // mnode loads + if (tEncodeI32(&encoder, pReq->mload.syncState) < 0) return -1; + tEndEncode(&encoder); int32_t tlen = encoder.pos; @@ -946,6 +953,8 @@ int32_t tDeserializeSStatusReq(void *buf, int32_t bufLen, SStatusReq *pReq) { } } + if (tDecodeI32(&decoder, &pReq->mload.syncState) < 0) return -1; + tEndDecode(&decoder); tDecoderClear(&decoder); return 0; @@ -1675,6 +1684,7 @@ int32_t tSerializeSCreateDbReq(void *buf, int32_t bufLen, SCreateDbReq *pReq) { if (tEncodeI8(&encoder, pReq->replications) < 0) return -1; if (tEncodeI8(&encoder, pReq->strict) < 0) return -1; if (tEncodeI8(&encoder, pReq->cacheLastRow) < 0) return -1; + if (tEncodeI8(&encoder, pReq->schemaless) < 0) return -1; if (tEncodeI8(&encoder, pReq->ignoreExist) < 0) return -1; if (tEncodeI32(&encoder, pReq->numOfRetensions) < 0) return -1; for (int32_t i = 0; i < pReq->numOfRetensions; ++i) { @@ -1715,6 +1725,7 @@ int32_t tDeserializeSCreateDbReq(void *buf, int32_t bufLen, SCreateDbReq *pReq) if (tDecodeI8(&decoder, &pReq->replications) < 0) return -1; if (tDecodeI8(&decoder, &pReq->strict) < 0) return -1; if (tDecodeI8(&decoder, &pReq->cacheLastRow) < 0) return -1; + if (tDecodeI8(&decoder, &pReq->schemaless) < 0) return -1; if (tDecodeI8(&decoder, &pReq->ignoreExist) < 0) return -1; if (tDecodeI32(&decoder, &pReq->numOfRetensions) < 0) return -1; pReq->pRetensions = taosArrayInit(pReq->numOfRetensions, sizeof(SRetention)); @@ -2908,6 +2919,8 @@ int32_t tSerializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq *pR if (tEncodeI8(&encoder, pRetension->freqUnit) < 0) return -1; if (tEncodeI8(&encoder, pRetension->keepUnit) < 0) return -1; } + + if (tEncodeI8(&encoder, pReq->isTsma) < 0) return -1; tEndEncode(&encoder); int32_t tlen = encoder.pos; @@ -2970,6 +2983,8 @@ int32_t tDeserializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq * } } + if (tDecodeI8(&decoder, &pReq->isTsma) < 0) return -1; + tEndDecode(&decoder); tDecoderClear(&decoder); return 0; @@ -3181,7 +3196,6 @@ int32_t tSerializeSDCreateMnodeReq(void *buf, int32_t bufLen, SDCreateMnodeReq * tEncoderInit(&encoder, buf, bufLen); if (tStartEncode(&encoder) < 0) return -1; - if (tEncodeI32(&encoder, pReq->dnodeId) < 0) return -1; if (tEncodeI8(&encoder, pReq->replica) < 0) return -1; for (int32_t i = 0; i < TSDB_MAX_REPLICA; ++i) { SReplica *pReplica = &pReq->replicas[i]; @@ -3199,7 +3213,6 @@ int32_t tDeserializeSDCreateMnodeReq(void *buf, int32_t bufLen, SDCreateMnodeReq tDecoderInit(&decoder, buf, bufLen); if (tStartDecode(&decoder) < 0) return -1; - if (tDecodeI32(&decoder, &pReq->dnodeId) < 0) return -1; if (tDecodeI8(&decoder, &pReq->replica) < 0) return -1; for (int32_t i = 0; i < TSDB_MAX_REPLICA; ++i) { SReplica *pReplica = &pReq->replicas[i]; @@ -3318,9 +3331,11 @@ int32_t tSerializeSExplainRsp(void *buf, int32_t bufLen, SExplainRsp *pRsp) { if (tEncodeI32(&encoder, pRsp->numOfPlans) < 0) return -1; for (int32_t i = 0; i < pRsp->numOfPlans; ++i) { SExplainExecInfo *info = &pRsp->subplanInfo[i]; - if (tEncodeU64(&encoder, info->startupCost) < 0) return -1; - if (tEncodeU64(&encoder, info->totalCost) < 0) return -1; + if (tEncodeDouble(&encoder, info->startupCost) < 0) return -1; + if (tEncodeDouble(&encoder, info->totalCost) < 0) return -1; if (tEncodeU64(&encoder, info->numOfRows) < 0) return -1; + if (tEncodeU32(&encoder, info->verboseLen) < 0) return -1; + if (tEncodeBinary(&encoder, info->verboseInfo, info->verboseLen) < 0) return -1; } tEndEncode(&encoder); @@ -3341,9 +3356,12 @@ int32_t tDeserializeSExplainRsp(void *buf, int32_t bufLen, SExplainRsp *pRsp) { if (pRsp->subplanInfo == NULL) return -1; } for (int32_t i = 0; i < pRsp->numOfPlans; ++i) { - if (tDecodeU64(&decoder, &pRsp->subplanInfo[i].startupCost) < 0) return -1; - if (tDecodeU64(&decoder, &pRsp->subplanInfo[i].totalCost) < 0) return -1; + if (tDecodeDouble(&decoder, &pRsp->subplanInfo[i].startupCost) < 0) return -1; + if (tDecodeDouble(&decoder, &pRsp->subplanInfo[i].totalCost) < 0) return -1; if (tDecodeU64(&decoder, &pRsp->subplanInfo[i].numOfRows) < 0) return -1; + if (tDecodeU32(&decoder, &pRsp->subplanInfo[i].verboseLen) < 0) return -1; + if (tDecodeBinary(&decoder, (uint8_t **)&pRsp->subplanInfo[i].verboseInfo, &pRsp->subplanInfo[i].verboseLen) < 0) + return -1; } tEndDecode(&decoder); @@ -3491,31 +3509,6 @@ int32_t tDeserializeSSchedulerHbRsp(void *buf, int32_t bufLen, SSchedulerHbRsp * void tFreeSSchedulerHbRsp(SSchedulerHbRsp *pRsp) { taosArrayDestroy(pRsp->taskStatus); } -int32_t tSerializeSQueryTableRsp(void *buf, int32_t bufLen, SQueryTableRsp *pRsp) { - SEncoder encoder = {0}; - tEncoderInit(&encoder, buf, bufLen); - - if (tStartEncode(&encoder) < 0) return -1; - if (tEncodeI32(&encoder, pRsp->code) < 0) return -1; - tEndEncode(&encoder); - - int32_t tlen = encoder.pos; - tEncoderClear(&encoder); - return tlen; -} - -int32_t tDeserializeSQueryTableRsp(void *buf, int32_t bufLen, SQueryTableRsp *pRsp) { - SDecoder decoder = {0}; - tDecoderInit(&decoder, buf, bufLen); - - if (tStartDecode(&decoder) < 0) return -1; - if (tDecodeI32(&decoder, &pRsp->code) < 0) return -1; - tEndDecode(&decoder); - - tDecoderClear(&decoder); - return 0; -} - int32_t tSerializeSVCreateTbBatchRsp(void *buf, int32_t bufLen, SVCreateTbBatchRsp *pRsp) { // SEncoder encoder = {0}; // tEncoderInit(&encoder, buf, bufLen); @@ -3692,6 +3685,7 @@ int32_t tSerializeSCMCreateStreamReq(void *buf, int32_t bufLen, const SCMCreateS if (tStartEncode(&encoder) < 0) return -1; if (tEncodeCStr(&encoder, pReq->name) < 0) return -1; + if (tEncodeCStr(&encoder, pReq->sourceDB) < 0) return -1; if (tEncodeCStr(&encoder, pReq->targetStbFullName) < 0) return -1; if (tEncodeI8(&encoder, pReq->igExists) < 0) return -1; if (tEncodeI32(&encoder, sqlLen) < 0) return -1; @@ -3717,6 +3711,7 @@ int32_t tDeserializeSCMCreateStreamReq(void *buf, int32_t bufLen, SCMCreateStrea if (tStartDecode(&decoder) < 0) return -1; if (tDecodeCStrTo(&decoder, pReq->name) < 0) return -1; + if (tDecodeCStrTo(&decoder, pReq->sourceDB) < 0) return -1; if (tDecodeCStrTo(&decoder, pReq->targetStbFullName) < 0) return -1; if (tDecodeI8(&decoder, &pReq->igExists) < 0) return -1; if (tDecodeI32(&decoder, &sqlLen) < 0) return -1; @@ -3789,7 +3784,7 @@ int tEncodeSVCreateStbReq(SEncoder *pCoder, const SVCreateStbReq *pReq) { if (tEncodeCStr(pCoder, pReq->name) < 0) return -1; if (tEncodeI64(pCoder, pReq->suid) < 0) return -1; if (tEncodeI8(pCoder, pReq->rollup) < 0) return -1; - if (tEncodeSSchemaWrapper(pCoder, &pReq->schema) < 0) return -1; + if (tEncodeSSchemaWrapper(pCoder, &pReq->schemaRow) < 0) return -1; if (tEncodeSSchemaWrapper(pCoder, &pReq->schemaTag) < 0) return -1; if (pReq->rollup) { if (tEncodeSRSmaParam(pCoder, &pReq->pRSmaParam) < 0) return -1; @@ -3805,7 +3800,7 @@ int tDecodeSVCreateStbReq(SDecoder *pCoder, SVCreateStbReq *pReq) { if (tDecodeCStr(pCoder, &pReq->name) < 0) return -1; if (tDecodeI64(pCoder, &pReq->suid) < 0) return -1; if (tDecodeI8(pCoder, &pReq->rollup) < 0) return -1; - if (tDecodeSSchemaWrapper(pCoder, &pReq->schema) < 0) return -1; + if (tDecodeSSchemaWrapper(pCoder, &pReq->schemaRow) < 0) return -1; if (tDecodeSSchemaWrapper(pCoder, &pReq->schemaTag) < 0) return -1; if (pReq->rollup) { if (tDecodeSRSmaParam(pCoder, &pReq->pRSmaParam) < 0) return -1; @@ -3817,7 +3812,7 @@ int tDecodeSVCreateStbReq(SDecoder *pCoder, SVCreateStbReq *pReq) { STSchema *tdGetSTSChemaFromSSChema(SSchema **pSchema, int32_t nCols) { STSchemaBuilder schemaBuilder = {0}; - if (tdInitTSchemaBuilder(&schemaBuilder, 0) < 0) { + if (tdInitTSchemaBuilder(&schemaBuilder, 1) < 0) { return NULL; } @@ -3843,10 +3838,9 @@ int tEncodeSVCreateTbReq(SEncoder *pCoder, const SVCreateTbReq *pReq) { if (tStartEncode(pCoder) < 0) return -1; if (tEncodeI32v(pCoder, pReq->flags) < 0) return -1; + if (tEncodeCStr(pCoder, pReq->name) < 0) return -1; if (tEncodeI64(pCoder, pReq->uid) < 0) return -1; if (tEncodeI64(pCoder, pReq->ctime) < 0) return -1; - - if (tEncodeCStr(pCoder, pReq->name) < 0) return -1; if (tEncodeI32(pCoder, pReq->ttl) < 0) return -1; if (tEncodeI8(pCoder, pReq->type) < 0) return -1; @@ -3854,7 +3848,7 @@ int tEncodeSVCreateTbReq(SEncoder *pCoder, const SVCreateTbReq *pReq) { if (tEncodeI64(pCoder, pReq->ctb.suid) < 0) return -1; if (tEncodeBinary(pCoder, pReq->ctb.pTag, kvRowLen(pReq->ctb.pTag)) < 0) return -1; } else if (pReq->type == TSDB_NORMAL_TABLE) { - if (tEncodeSSchemaWrapper(pCoder, &pReq->ntb.schema) < 0) return -1; + if (tEncodeSSchemaWrapper(pCoder, &pReq->ntb.schemaRow) < 0) return -1; } else { ASSERT(0); } @@ -3869,10 +3863,9 @@ int tDecodeSVCreateTbReq(SDecoder *pCoder, SVCreateTbReq *pReq) { if (tStartDecode(pCoder) < 0) return -1; if (tDecodeI32v(pCoder, &pReq->flags) < 0) return -1; + if (tDecodeCStr(pCoder, &pReq->name) < 0) return -1; if (tDecodeI64(pCoder, &pReq->uid) < 0) return -1; if (tDecodeI64(pCoder, &pReq->ctime) < 0) return -1; - - if (tDecodeCStr(pCoder, &pReq->name) < 0) return -1; if (tDecodeI32(pCoder, &pReq->ttl) < 0) return -1; if (tDecodeI8(pCoder, &pReq->type) < 0) return -1; @@ -3880,7 +3873,7 @@ int tDecodeSVCreateTbReq(SDecoder *pCoder, SVCreateTbReq *pReq) { if (tDecodeI64(pCoder, &pReq->ctb.suid) < 0) return -1; if (tDecodeBinary(pCoder, &pReq->ctb.pTag, &len) < 0) return -1; } else if (pReq->type == TSDB_NORMAL_TABLE) { - if (tDecodeSSchemaWrapper(pCoder, &pReq->ntb.schema) < 0) return -1; + if (tDecodeSSchemaWrapper(pCoder, &pReq->ntb.schemaRow) < 0) return -1; } else { ASSERT(0); } diff --git a/source/common/src/tname.c b/source/common/src/tname.c index 104dee261c9f64c7c8859228dcb0595f4b4df2c0..fd055135799a5e508ec535b43d46e9246c8d644e 100644 --- a/source/common/src/tname.c +++ b/source/common/src/tname.c @@ -127,7 +127,7 @@ int32_t tNameExtractFullName(const SName* name, char* dst) { size_t tnameLen = strlen(name->tname); if (tnameLen > 0) { - assert(name->type == TSDB_TABLE_NAME_T); + /*assert(name->type == TSDB_TABLE_NAME_T);*/ dst[len] = TS_PATH_DELIMITER[0]; memcpy(dst + len + 1, name->tname, tnameLen); @@ -314,9 +314,9 @@ void buildChildTableName(RandTableName* rName) { for (int j = 0; j < taosArrayGetSize(rName->tags); ++j) { SSmlKv* tagKv = taosArrayGetP(rName->tags, j); taosStringBuilderAppendStringLen(&sb, tagKv->key, tagKv->keyLen); - if(IS_VAR_DATA_TYPE(tagKv->type)){ + if (IS_VAR_DATA_TYPE(tagKv->type)) { taosStringBuilderAppendStringLen(&sb, tagKv->value, tagKv->length); - }else{ + } else { taosStringBuilderAppendStringLen(&sb, (char*)(&(tagKv->value)), tagKv->length); } } diff --git a/source/common/src/trow.c b/source/common/src/trow.c index 22bdd960eac7b2e6bcfb6ded211effbdc692f64d..c8a28d7f28f747b65fae3802bc392ac6163e5e1e 100644 --- a/source/common/src/trow.c +++ b/source/common/src/trow.c @@ -341,18 +341,19 @@ int32_t tdSetBitmapValTypeN(void *pBitmap, int16_t nEle, TDRowValT valType, int8 bool tdIsBitmapBlkNorm(const void *pBitmap, int32_t numOfBits, int8_t bitmapMode) { int32_t nBytes = (bitmapMode == 0 ? numOfBits / TD_VTYPE_PARTS : numOfBits / TD_VTYPE_PARTS_I); uint8_t vTypeByte = tdVTypeByte[bitmapMode][TD_VTYPE_NORM]; + uint8_t *qBitmap = (uint8_t*)pBitmap; for (int i = 0; i < nBytes; ++i) { - if (*((uint8_t *)pBitmap) != vTypeByte) { + if (*qBitmap != vTypeByte) { return false; } - pBitmap = POINTER_SHIFT(pBitmap, i); + qBitmap = (uint8_t *)POINTER_SHIFT(pBitmap, i); } int32_t nLeft = numOfBits - nBytes * (bitmapMode == 0 ? TD_VTYPE_BITS : TD_VTYPE_BITS_I); for (int j = 0; j < nLeft; ++j) { uint8_t vType; - tdGetBitmapValType(pBitmap, j, &vType, bitmapMode); + tdGetBitmapValType(qBitmap, j, &vType, bitmapMode); if (vType != TD_VTYPE_NORM) { return false; } @@ -604,6 +605,10 @@ static int32_t tdAppendKvRowToDataCol(STSRow *pRow, STSchema *pSchema, SDataCols * @param pCols */ int32_t tdAppendSTSRowToDataCol(STSRow *pRow, STSchema *pSchema, SDataCols *pCols, bool isMerge) { +#ifdef TD_DEBUG_PRINT_TSDB_LOAD_DCOLS + printf("%s:%d ts: %" PRIi64 " sver:%d maxCols:%" PRIi16 " nCols:%" PRIi16 ", nRows:%d\n", __func__, __LINE__, + TD_ROW_KEY(pRow), TD_ROW_SVER(pRow), pCols->maxCols, pCols->numOfCols, pCols->numOfRows); +#endif if (TD_IS_TP_ROW(pRow)) { return tdAppendTpRowToDataCol(pRow, pSchema, pCols, isMerge); } else if (TD_IS_KV_ROW(pRow)) { @@ -923,7 +928,7 @@ void tdSRowPrint(STSRow *row, STSchema *pSchema, const char *tag) { STSRowIter iter = {0}; tdSTSRowIterInit(&iter, pSchema); tdSTSRowIterReset(&iter, row); - printf("%s >>>", tag); + printf("%s >>>type:%d,sver:%d ", tag, (int32_t)TD_ROW_TYPE(row), (int32_t)TD_ROW_SVER(row)); for (int i = 0; i < pSchema->numOfCols; ++i) { STColumn *stCol = pSchema->columns + i; SCellVal sVal = {255, NULL}; @@ -1190,9 +1195,9 @@ bool tdGetTpRowDataOfCol(STSRowIter *pIter, col_type_t colType, int32_t offset, } static FORCE_INLINE int32_t compareKvRowColId(const void *key1, const void *key2) { - if (*(int16_t *)key1 > ((SColIdx *)key2)->colId) { + if (*(col_id_t *)key1 > ((SKvRowIdx *)key2)->colId) { return 1; - } else if (*(int16_t *)key1 < ((SColIdx *)key2)->colId) { + } else if (*(col_id_t *)key1 < ((SKvRowIdx *)key2)->colId) { return -1; } else { return 0; diff --git a/source/common/src/ttime.c b/source/common/src/ttime.c index 69ba964187fe44f33b4df1ab8b5c7706a8569eec..10ba58af298c59306badc2e299e588e3ec46874f 100644 --- a/source/common/src/ttime.c +++ b/source/common/src/ttime.c @@ -184,6 +184,16 @@ int32_t parseTimezone(char* str, int64_t* tzOffset) { i++; + int32_t j = i; + while (str[j]) { + if ((str[j] >= '0' && str[j] <= '9') || str[j] == ':') { + ++j; + continue; + } + + return -1; + } + char* sep = strchr(&str[i], ':'); if (sep != NULL) { int32_t len = (int32_t)(sep - &str[i]); @@ -511,21 +521,21 @@ int32_t convertStringToTimestamp(int16_t type, char *inputData, int64_t timePrec if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_VARBINARY) { newColData = taosMemoryCalloc(1, charLen + 1); memcpy(newColData, varDataVal(inputData), charLen); - bool ret = taosParseTime(newColData, timeVal, charLen, (int32_t)timePrec, tsDaylight); + int32_t ret = taosParseTime(newColData, timeVal, charLen, (int32_t)timePrec, tsDaylight); if (ret != TSDB_CODE_SUCCESS) { taosMemoryFree(newColData); - return ret; + return TSDB_CODE_INVALID_TIMESTAMP; } taosMemoryFree(newColData); } else if (type == TSDB_DATA_TYPE_NCHAR) { - newColData = taosMemoryCalloc(1, charLen / TSDB_NCHAR_SIZE + 1); + newColData = taosMemoryCalloc(1, charLen + TSDB_NCHAR_SIZE); int len = taosUcs4ToMbs((TdUcs4 *)varDataVal(inputData), charLen, newColData); if (len < 0){ taosMemoryFree(newColData); return TSDB_CODE_FAILED; } newColData[len] = 0; - bool ret = taosParseTime(newColData, timeVal, len + 1, (int32_t)timePrec, tsDaylight); + int32_t ret = taosParseTime(newColData, timeVal, len + 1, (int32_t)timePrec, tsDaylight); if (ret != TSDB_CODE_SUCCESS) { taosMemoryFree(newColData); return ret; @@ -773,7 +783,7 @@ int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precisio // 2020-07-03 17:48:42 // and the parameter can also be a variable. const char* fmtts(int64_t ts) { - static char buf[96]; + static char buf[96] = {0}; size_t pos = 0; struct tm tm; diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c index f7337f482f23945b99893dee242d9af9a10631a6..2533f268e5cd5355c1dba75fb384e977c386d1fa 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c @@ -75,8 +75,9 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) { (*pMgmt->getVnodeLoadsFp)(&vinfo); req.pVloads = vinfo.pVloads; - SMonMloadInfo minfo = {0}; + SMonMloadInfo minfo = {0}; (*pMgmt->getMnodeLoadsFp)(&minfo); + req.mload = minfo.load; int32_t contLen = tSerializeSStatusReq(NULL, 0, &req); void *pHead = rpcMallocCont(contLen); @@ -91,6 +92,13 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) { SEpSet epSet = {0}; dmGetMnodeEpSet(pMgmt->pData, &epSet); rpcSendRecv(pMgmt->msgCb.clientRpc, &epSet, &rpcMsg, &rpcRsp); + if (rpcRsp.code != 0) { + dError("failed to send status msg since %s, numOfEps:%d inUse:%d", tstrerror(rpcRsp.code), epSet.numOfEps, + epSet.inUse); + for (int32_t i = 0; i < epSet.numOfEps; ++i) { + dDebug("index:%d, mnode ep:%s:%u", i, epSet.eps[i].fqdn, epSet.eps[i].port); + } + } dmProcessStatusRsp(pMgmt, &rpcRsp); } diff --git a/source/dnode/mgmt/mgmt_mnode/inc/mmInt.h b/source/dnode/mgmt/mgmt_mnode/inc/mmInt.h index 75e83d65471fdebfba4fdbfa3083a2dc02f7fd22..bd034fe7d6c21dcf31e0ca4e9e83d7a23fa28fb8 100644 --- a/source/dnode/mgmt/mgmt_mnode/inc/mmInt.h +++ b/source/dnode/mgmt/mgmt_mnode/inc/mmInt.h @@ -24,19 +24,21 @@ extern "C" { #endif typedef struct SMnodeMgmt { - SDnodeData *pData; - SMnode *pMnode; - SMsgCb msgCb; - const char *path; - const char *name; - SSingleWorker queryWorker; - SSingleWorker readWorker; - SSingleWorker writeWorker; - SSingleWorker syncWorker; - SSingleWorker monitorWorker; - SReplica replicas[TSDB_MAX_REPLICA]; - int8_t replica; - int8_t selfIndex; + SDnodeData *pData; + SMnode *pMnode; + SMsgCb msgCb; + const char *path; + const char *name; + SSingleWorker queryWorker; + SSingleWorker readWorker; + SSingleWorker writeWorker; + SSingleWorker syncWorker; + SSingleWorker monitorWorker; + SReplica replicas[TSDB_MAX_REPLICA]; + int8_t replica; + bool stopped; + int32_t refCount; + TdThreadRwlock lock; } SMnodeMgmt; // mmFile.c @@ -44,7 +46,8 @@ int32_t mmReadFile(SMnodeMgmt *pMgmt, bool *pDeployed); int32_t mmWriteFile(SMnodeMgmt *pMgmt, SDCreateMnodeReq *pMsg, bool deployed); // mmInt.c -int32_t mmAlter(SMnodeMgmt *pMgmt, SDAlterMnodeReq *pMsg); +int32_t mmAcquire(SMnodeMgmt *pMgmt); +void mmRelease(SMnodeMgmt *pMgmt); // mmHandle.c SArray *mmGetMsgHandles(); diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmFile.c b/source/dnode/mgmt/mgmt_mnode/src/mmFile.c index 2aa108777078de3e9b2b8a2323c0d28572a15db2..478d6abd52cdba9c0a2f99acd3001e281ade6b8d 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmFile.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmFile.c @@ -53,43 +53,45 @@ int32_t mmReadFile(SMnodeMgmt *pMgmt, bool *pDeployed) { *pDeployed = deployed->valueint; cJSON *mnodes = cJSON_GetObjectItem(root, "mnodes"); - if (!mnodes || mnodes->type != cJSON_Array) { - dError("failed to read %s since nodes not found", file); - goto _OVER; - } - - pMgmt->replica = cJSON_GetArraySize(mnodes); - if (pMgmt->replica <= 0 || pMgmt->replica > TSDB_MAX_REPLICA) { - dError("failed to read %s since mnodes size %d invalid", file, pMgmt->replica); - goto _OVER; - } - - for (int32_t i = 0; i < pMgmt->replica; ++i) { - cJSON *node = cJSON_GetArrayItem(mnodes, i); - if (node == NULL) break; - - SReplica *pReplica = &pMgmt->replicas[i]; - - cJSON *id = cJSON_GetObjectItem(node, "id"); - if (!id || id->type != cJSON_Number) { - dError("failed to read %s since id not found", file); + if (mnodes != NULL) { + if (!mnodes || mnodes->type != cJSON_Array) { + dError("failed to read %s since nodes not found", file); goto _OVER; } - pReplica->id = id->valueint; - cJSON *fqdn = cJSON_GetObjectItem(node, "fqdn"); - if (!fqdn || fqdn->type != cJSON_String || fqdn->valuestring == NULL) { - dError("failed to read %s since fqdn not found", file); + pMgmt->replica = cJSON_GetArraySize(mnodes); + if (pMgmt->replica <= 0 || pMgmt->replica > TSDB_MAX_REPLICA) { + dError("failed to read %s since mnodes size %d invalid", file, pMgmt->replica); goto _OVER; } - tstrncpy(pReplica->fqdn, fqdn->valuestring, TSDB_FQDN_LEN); - cJSON *port = cJSON_GetObjectItem(node, "port"); - if (!port || port->type != cJSON_Number) { - dError("failed to read %s since port not found", file); - goto _OVER; + for (int32_t i = 0; i < pMgmt->replica; ++i) { + cJSON *node = cJSON_GetArrayItem(mnodes, i); + if (node == NULL) break; + + SReplica *pReplica = &pMgmt->replicas[i]; + + cJSON *id = cJSON_GetObjectItem(node, "id"); + if (!id || id->type != cJSON_Number) { + dError("failed to read %s since id not found", file); + goto _OVER; + } + pReplica->id = id->valueint; + + cJSON *fqdn = cJSON_GetObjectItem(node, "fqdn"); + if (!fqdn || fqdn->type != cJSON_String || fqdn->valuestring == NULL) { + dError("failed to read %s since fqdn not found", file); + goto _OVER; + } + tstrncpy(pReplica->fqdn, fqdn->valuestring, TSDB_FQDN_LEN); + + cJSON *port = cJSON_GetObjectItem(node, "port"); + if (!port || port->type != cJSON_Number) { + dError("failed to read %s since port not found", file); + goto _OVER; + } + pReplica->port = port->valueint; } - pReplica->port = port->valueint; } code = 0; @@ -122,21 +124,23 @@ int32_t mmWriteFile(SMnodeMgmt *pMgmt, SDCreateMnodeReq *pMsg, bool deployed) { char *content = taosMemoryCalloc(1, maxLen + 1); len += snprintf(content + len, maxLen - len, "{\n"); - len += snprintf(content + len, maxLen - len, " \"mnodes\": [{\n"); int8_t replica = (pMsg != NULL ? pMsg->replica : pMgmt->replica); - for (int32_t i = 0; i < replica; ++i) { - SReplica *pReplica = &pMgmt->replicas[i]; - if (pMsg != NULL) { - pReplica = &pMsg->replicas[i]; - } - len += snprintf(content + len, maxLen - len, " \"id\": %d,\n", pReplica->id); - len += snprintf(content + len, maxLen - len, " \"fqdn\": \"%s\",\n", pReplica->fqdn); - len += snprintf(content + len, maxLen - len, " \"port\": %u\n", pReplica->port); - if (i < replica - 1) { - len += snprintf(content + len, maxLen - len, " },{\n"); - } else { - len += snprintf(content + len, maxLen - len, " }],\n"); + if (replica > 0) { + len += snprintf(content + len, maxLen - len, " \"mnodes\": [{\n"); + for (int32_t i = 0; i < replica; ++i) { + SReplica *pReplica = &pMgmt->replicas[i]; + if (pMsg != NULL) { + pReplica = &pMsg->replicas[i]; + } + len += snprintf(content + len, maxLen - len, " \"id\": %d,\n", pReplica->id); + len += snprintf(content + len, maxLen - len, " \"fqdn\": \"%s\",\n", pReplica->fqdn); + len += snprintf(content + len, maxLen - len, " \"port\": %u\n", pReplica->port); + if (i < replica - 1) { + len += snprintf(content + len, maxLen - len, " },{\n"); + } else { + len += snprintf(content + len, maxLen - len, " }],\n"); + } } } diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c index 2ce42d7a5ff30c4cdb6d1da2a00c933cc2e882ac..f6350ba27954349a89849f66a9d15be7ffb6266d 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c @@ -79,7 +79,7 @@ int32_t mmProcessCreateReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg) { return -1; } - if (createReq.replica <= 1 || (createReq.dnodeId != pInput->pData->dnodeId && pInput->pData->dnodeId != 0)) { + if (createReq.replica != 1) { terrno = TSDB_CODE_INVALID_OPTION; dError("failed to create mnode since %s", terrstr()); return -1; @@ -124,22 +124,6 @@ int32_t mmProcessDropReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg) { return 0; } -int32_t mmProcessAlterReq(SMnodeMgmt *pMgmt, SRpcMsg *pMsg) { - SDAlterMnodeReq alterReq = {0}; - if (tDeserializeSDCreateMnodeReq(pMsg->pCont, pMsg->contLen, &alterReq) != 0) { - terrno = TSDB_CODE_INVALID_MSG; - return -1; - } - - if (pMgmt->pData->dnodeId != 0 && alterReq.dnodeId != pMgmt->pData->dnodeId) { - terrno = TSDB_CODE_INVALID_OPTION; - dError("failed to alter mnode since %s, input:%d cur:%d", terrstr(), alterReq.dnodeId, pMgmt->pData->dnodeId); - return -1; - } else { - return mmAlter(pMgmt, &alterReq); - } -} - SArray *mmGetMsgHandles() { int32_t code = -1; SArray *pArray = taosArrayInit(64, sizeof(SMgmtHandle)); @@ -237,6 +221,16 @@ SArray *mmGetMsgHandles() { if (dmSetMgmtHandle(pArray, TDMT_VND_SYNC_VNODE_RSP, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_COMPACT_VNODE_RSP, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_SYNC_TIMEOUT, mmPutNodeMsgToSyncQueue, 1) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_SYNC_PING, mmPutNodeMsgToSyncQueue, 1) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_SYNC_PING_REPLY, mmPutNodeMsgToSyncQueue, 1) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_SYNC_CLIENT_REQUEST, mmPutNodeMsgToSyncQueue, 1) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_SYNC_CLIENT_REQUEST_REPLY, mmPutNodeMsgToSyncQueue, 1) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_SYNC_REQUEST_VOTE, mmPutNodeMsgToSyncQueue, 1) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_SYNC_REQUEST_VOTE_REPLY, mmPutNodeMsgToSyncQueue, 1) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_SYNC_APPEND_ENTRIES, mmPutNodeMsgToSyncQueue, 1) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_SYNC_APPEND_ENTRIES_REPLY, mmPutNodeMsgToSyncQueue, 1) == NULL) goto _OVER; + code = 0; _OVER: diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmInt.c b/source/dnode/mgmt/mgmt_mnode/src/mmInt.c index 4f7fd4a1c0c925093b3773e06b9dfba1718ce945..1b973f3045d5dd4e2f6e5fcc4e25413068af6af5 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmInt.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmInt.c @@ -39,77 +39,45 @@ static int32_t mmRequire(const SMgmtInputOpt *pInput, bool *required) { } static void mmBuildOptionForDeploy(SMnodeMgmt *pMgmt, const SMgmtInputOpt *pInput, SMnodeOpt *pOption) { + pOption->standby = false; + pOption->deploy = true; pOption->msgCb = pMgmt->msgCb; + pOption->dnodeId = pMgmt->pData->dnodeId; + pOption->replica = 1; pOption->selfIndex = 0; + SReplica *pReplica = &pOption->replicas[0]; pReplica->id = 1; pReplica->port = tsServerPort; tstrncpy(pReplica->fqdn, tsLocalFqdn, TSDB_FQDN_LEN); - pOption->deploy = true; - - pMgmt->selfIndex = pOption->selfIndex; - pMgmt->replica = pOption->replica; - memcpy(&pMgmt->replicas, pOption->replicas, sizeof(SReplica) * TSDB_MAX_REPLICA); } static void mmBuildOptionForOpen(SMnodeMgmt *pMgmt, SMnodeOpt *pOption) { - pOption->msgCb = pMgmt->msgCb; - pOption->selfIndex = pMgmt->selfIndex; - pOption->replica = pMgmt->replica; - memcpy(&pOption->replicas, pMgmt->replicas, sizeof(SReplica) * TSDB_MAX_REPLICA); pOption->deploy = false; -} - -static int32_t mmBuildOptionFromReq(SMnodeMgmt *pMgmt, SMnodeOpt *pOption, SDCreateMnodeReq *pCreate) { + pOption->standby = false; pOption->msgCb = pMgmt->msgCb; - pOption->replica = pCreate->replica; - pOption->selfIndex = -1; - for (int32_t i = 0; i < pCreate->replica; ++i) { - SReplica *pReplica = &pOption->replicas[i]; - pReplica->id = pCreate->replicas[i].id; - pReplica->port = pCreate->replicas[i].port; - memcpy(pReplica->fqdn, pCreate->replicas[i].fqdn, TSDB_FQDN_LEN); - if (pReplica->id == pMgmt->pData->dnodeId) { - pOption->selfIndex = i; + pOption->dnodeId = pMgmt->pData->dnodeId; + + if (pMgmt->replica > 0) { + pOption->standby = true; + pOption->replica = 1; + pOption->selfIndex = 0; + SReplica *pReplica = &pOption->replicas[0]; + for (int32_t i = 0; i < pMgmt->replica; ++i) { + if (pMgmt->replicas[i].id != pMgmt->pData->dnodeId) continue; + pReplica->id = pMgmt->replicas[i].id; + pReplica->port = pMgmt->replicas[i].port; + memcpy(pReplica->fqdn, pMgmt->replicas[i].fqdn, TSDB_FQDN_LEN); } } - - if (pOption->selfIndex == -1) { - dError("failed to build mnode options since %s", terrstr()); - return -1; - } - pOption->deploy = true; - - pMgmt->selfIndex = pOption->selfIndex; - pMgmt->replica = pOption->replica; - memcpy(&pMgmt->replicas, pOption->replicas, sizeof(SReplica) * TSDB_MAX_REPLICA); - return 0; -} - -int32_t mmAlter(SMnodeMgmt *pMgmt, SDAlterMnodeReq *pMsg) { - SMnodeOpt option = {0}; - if (mmBuildOptionFromReq(pMgmt, &option, pMsg) != 0) { - return -1; - } - - if (mndAlter(pMgmt->pMnode, &option) != 0) { - return -1; - } - - bool deployed = true; - if (mmWriteFile(pMgmt, pMsg, deployed) != 0) { - dError("failed to write mnode file since %s", terrstr()); - return -1; - } - - return 0; } static void mmClose(SMnodeMgmt *pMgmt) { if (pMgmt->pMnode != NULL) { mmStopWorker(pMgmt); mndClose(pMgmt->pMnode); + taosThreadRwlockDestroy(&pMgmt->lock); pMgmt->pMnode = NULL; } @@ -122,6 +90,11 @@ static int32_t mmOpen(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { return -1; } + if (syncInit() != 0) { + dError("failed to init sync since %s", terrstr()); + return -1; + } + SMnodeMgmt *pMgmt = taosMemoryCalloc(1, sizeof(SMnodeMgmt)); if (pMgmt == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -137,6 +110,7 @@ static int32_t mmOpen(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { pMgmt->msgCb.queueFps[WRITE_QUEUE] = (PutToQueueFp)mmPutRpcMsgToWriteQueue; pMgmt->msgCb.queueFps[SYNC_QUEUE] = (PutToQueueFp)mmPutRpcMsgToSyncQueue; pMgmt->msgCb.mgmt = pMgmt; + taosThreadRwlockInit(&pMgmt->lock, NULL); bool deployed = false; if (mmReadFile(pMgmt, &deployed) != 0) { @@ -170,7 +144,8 @@ static int32_t mmOpen(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { } tmsgReportStartup("mnode-worker", "initialized"); - if (!deployed) { + if (!deployed || pMgmt->replica > 0) { + pMgmt->replica = 0; deployed = true; if (mmWriteFile(pMgmt, NULL, deployed) != 0) { dError("failed to write mnode file since %s", terrstr()); @@ -206,3 +181,22 @@ SMgmtFunc mmGetMgmtFunc() { return mgmtFunc; } + +int32_t mmAcquire(SMnodeMgmt *pMgmt) { + int32_t code = 0; + + taosThreadRwlockRdlock(&pMgmt->lock); + if (pMgmt->stopped) { + code = -1; + } else { + atomic_add_fetch_32(&pMgmt->refCount, 1); + } + taosThreadRwlockUnlock(&pMgmt->lock); + return code; +} + +void mmRelease(SMnodeMgmt *pMgmt) { + taosThreadRwlockRdlock(&pMgmt->lock); + atomic_sub_fetch_32(&pMgmt->refCount, 1); + taosThreadRwlockUnlock(&pMgmt->lock); +} \ No newline at end of file diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c b/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c index 43ee7cd80a5aa1880092a4ca3abe4add2d42249c..1de9875d063933fe1f35bb5b5770c1aabc6b8fc3 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c @@ -32,9 +32,6 @@ static void mmProcessQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) { dTrace("msg:%p, get from mnode queue", pMsg); switch (pMsg->msgType) { - case TDMT_DND_ALTER_MNODE: - code = mmProcessAlterReq(pMgmt, pMsg); - break; case TDMT_MON_MM_INFO: code = mmProcessGetMonitorInfoReq(pMgmt, pMsg); break; @@ -43,7 +40,7 @@ static void mmProcessQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) { break; default: pMsg->info.node = pMgmt->pMnode; - code = mndProcessMsg(pMsg); + code = mndProcessRpcMsg(pMsg); } if (IsReq(pMsg) && pMsg->info.handle != NULL && code != TSDB_CODE_ACTION_IN_PROGRESS) { @@ -56,6 +53,23 @@ static void mmProcessQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) { taosFreeQitem(pMsg); } +static void mmProcessSyncQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) { + SMnodeMgmt *pMgmt = pInfo->ahandle; + dTrace("msg:%p, get from mnode-sync queue", pMsg); + + pMsg->info.node = pMgmt->pMnode; + + SMsgHead *pHead = pMsg->pCont; + pHead->contLen = ntohl(pHead->contLen); + pHead->vgId = ntohl(pHead->vgId); + + int32_t code = mndProcessSyncMsg(pMsg); + + dTrace("msg:%p, is freed, code:0x%x", pMsg, code); + rpcFreeCont(pMsg->pCont); + taosFreeQitem(pMsg); +} + static int32_t mmPutNodeMsgToWorker(SSingleWorker *pWorker, SRpcMsg *pMsg) { dTrace("msg:%p, put into worker %s, type:%s", pMsg, pWorker->name, TMSG_INFO(pMsg->msgType)); taosWriteQitem(pWorker->queue, pMsg); @@ -105,7 +119,17 @@ int32_t mmPutRpcMsgToReadQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg) { } int32_t mmPutRpcMsgToSyncQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg) { - return mmPutRpcMsgToWorker(&pMgmt->syncWorker, pMsg); + int32_t code = -1; + if (mmAcquire(pMgmt) == 0) { + code = mmPutRpcMsgToWorker(&pMgmt->syncWorker, pMsg); + mmRelease(pMgmt); + } + + if (code != 0) { + rpcFreeCont(pMsg->pCont); + pMsg->pCont = NULL; + } + return code; } int32_t mmStartWorker(SMnodeMgmt *pMgmt) { @@ -149,7 +173,7 @@ int32_t mmStartWorker(SMnodeMgmt *pMgmt) { .min = 1, .max = 1, .name = "mnode-sync", - .fp = (FItem)mmProcessQueue, + .fp = (FItem)mmProcessSyncQueue, .param = pMgmt, }; if (tSingleWorkerInit(&pMgmt->syncWorker, &sCfg) != 0) { @@ -174,6 +198,11 @@ int32_t mmStartWorker(SMnodeMgmt *pMgmt) { } void mmStopWorker(SMnodeMgmt *pMgmt) { + taosThreadRwlockWrlock(&pMgmt->lock); + pMgmt->stopped = 1; + taosThreadRwlockUnlock(&pMgmt->lock); + while (pMgmt->refCount > 0) taosMsleep(10); + tSingleWorkerCleanup(&pMgmt->monitorWorker); tSingleWorkerCleanup(&pMgmt->queryWorker); tSingleWorkerCleanup(&pMgmt->readWorker); diff --git a/source/dnode/mgmt/mgmt_qnode/src/qmHandle.c b/source/dnode/mgmt/mgmt_qnode/src/qmHandle.c index c4b1ab63e46d62720131953bbddc928fc351d31c..65794b7b8136f0d6314880399ac08a195eecd22a 100644 --- a/source/dnode/mgmt/mgmt_qnode/src/qmHandle.c +++ b/source/dnode/mgmt/mgmt_qnode/src/qmHandle.c @@ -16,7 +16,11 @@ #define _DEFAULT_SOURCE #include "qmInt.h" -void qmGetMonitorInfo(SQnodeMgmt *pMgmt, SMonQmInfo *qmInfo) {} +void qmGetMonitorInfo(SQnodeMgmt *pMgmt, SMonQmInfo *qmInfo) { + SQnodeLoad qload = {0}; + qndGetLoad(pMgmt->pQnode, &qload); + +} int32_t qmProcessGetMonitorInfoReq(SQnodeMgmt *pMgmt, SRpcMsg *pMsg) { SMonQmInfo qmInfo = {0}; @@ -101,8 +105,6 @@ SArray *qmGetMsgHandles() { if (dmSetMgmtHandle(pArray, TDMT_VND_FETCH_RSP, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY_HEARTBEAT, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_RES_READY, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_TASKS_STATUS, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_CANCEL_TASK, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_TASK, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER; diff --git a/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c b/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c index 35c94b7fbe786434cfb59191c8899949099d0325..e7fc261b67a8a6416cdbafae07552a5c9576bc22 100644 --- a/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c +++ b/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c @@ -36,7 +36,7 @@ static void qmProcessQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) { code = qmProcessGetMonitorInfoReq(pMgmt, pMsg); break; default: - code = qndProcessQueryMsg(pMgmt->pQnode, pMsg); + code = qndProcessQueryMsg(pMgmt->pQnode, pInfo->timestamp, pMsg); break; } diff --git a/source/dnode/mgmt/mgmt_snode/src/smHandle.c b/source/dnode/mgmt/mgmt_snode/src/smHandle.c index bf1bb145b7548f1e50958e4cf718ebdc627bdfcf..a3aab439debfbd536312a2b5cbc104b4cf0fa2e2 100644 --- a/source/dnode/mgmt/mgmt_snode/src/smHandle.c +++ b/source/dnode/mgmt/mgmt_snode/src/smHandle.c @@ -96,7 +96,7 @@ SArray *smGetMsgHandles() { // Requests handled by SNODE if (dmSetMgmtHandle(pArray, TDMT_SND_TASK_DEPLOY, smPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_SND_TASK_EXEC, smPutNodeMsgToExecQueue, 0) == NULL) goto _OVER; + /*if (dmSetMgmtHandle(pArray, TDMT_SND_TASK_EXEC, smPutNodeMsgToExecQueue, 0) == NULL) goto _OVER;*/ code = 0; _OVER: diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c index f28209f9828062f8ed27f194914b4ac11848735a..8374db129ffacd5ba7776662cb0bc393ac77667e 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c @@ -138,7 +138,7 @@ static void vmGenerateVnodeCfg(SCreateVnodeReq *pCreate, SVnodeCfg *pCfg) { pCfg->dbId = pCreate->dbUid; pCfg->szPage = pCreate->pageSize * 1024; pCfg->szCache = pCreate->pages; - pCfg->szBuf = pCreate->buffer * 1024 * 1024; + pCfg->szBuf = (uint64_t)pCreate->buffer * 1024 * 1024; pCfg->isWeak = true; pCfg->tsdbCfg.compression = pCreate->compression; pCfg->tsdbCfg.precision = pCreate->precision; @@ -183,7 +183,7 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return -1; } - dDebug("vgId:%d, create vnode req is received", createReq.vgId); + dDebug("vgId:%d, create vnode req is received, tsma:%d", createReq.vgId, createReq.isTsma); SVnodeCfg vnodeCfg = {0}; vmGenerateVnodeCfg(&createReq, &vnodeCfg); @@ -292,8 +292,6 @@ SArray *vmGetMsgHandles() { if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_CONNECT, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_DISCONNECT, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; // if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_SET_CUR, vmPutNodeMsgToWriteQueue, 0)== NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_RES_READY, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_TASKS_STATUS, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_CANCEL_TASK, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_TASK, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_CREATE_STB, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; @@ -310,9 +308,6 @@ SArray *vmGetMsgHandles() { if (dmSetMgmtHandle(pArray, TDMT_VND_CONSUME, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_DEPLOY, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY_HEARTBEAT, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_PIPE_EXEC, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_MERGE_EXEC, vmPutNodeMsgToMergeQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_WRITE_EXEC, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TRIGGER, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_RUN, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_DISPATCH, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER; diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c index e7bfbdae58bf0556c8e873511476b3b17bd75a73..6183794bdd9c87da091a64c5333ad42f70dd824e 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c @@ -113,6 +113,8 @@ static void vmProcessWriteQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO SRpcMsg *pMsg = *(SRpcMsg **)taosArrayGet(pArray, i); SRpcMsg rsp = {.info = pMsg->info}; + vnodePreprocessReq(pVnode->pImpl, pMsg); + int32_t ret = syncPropose(vnodeGetSyncHandle(pVnode->pImpl), pMsg, false); if (ret == TAOS_SYNC_PROPOSE_NOT_LEADER) { dTrace("msg:%p, is redirect since not leader, vgId:%d ", pMsg, pVnode->vgId); diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c index 6fbfae8b416efc68a0be9b101f1308aeba723752..987fc5441653a09c27d889b03af30150622f96a3 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c +++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c @@ -62,8 +62,10 @@ static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) { dmProcessNetTestReq(pDnode, pRpc); return; } else if (pRpc->msgType == TDMT_MND_SYSTABLE_RETRIEVE_RSP || pRpc->msgType == TDMT_VND_FETCH_RSP) { - qWorkerProcessFetchRsp(NULL, NULL, pRpc); + qWorkerProcessFetchRsp(NULL, NULL, pRpc, 0); return; + } else if (pRpc->msgType == TDMT_MND_STATUS_RSP && pEpSet != NULL) { + dmSetMnodeEpSet(&pDnode->data, pEpSet); } else { } @@ -204,29 +206,28 @@ static inline void dmSendRsp(SRpcMsg *pMsg) { } static void dmBuildMnodeRedirectRsp(SDnode *pDnode, SRpcMsg *pMsg) { - SMEpSet msg = {0}; - dmGetMnodeEpSetForRedirect(&pDnode->data, pMsg, &msg.epSet); + SEpSet epSet = {0}; + dmGetMnodeEpSetForRedirect(&pDnode->data, pMsg, &epSet); - int32_t contLen = tSerializeSMEpSet(NULL, 0, &msg); + int32_t contLen = tSerializeSEpSet(NULL, 0, &epSet); pMsg->pCont = rpcMallocCont(contLen); if (pMsg->pCont == NULL) { pMsg->code = TSDB_CODE_OUT_OF_MEMORY; } else { - tSerializeSMEpSet(pMsg->pCont, contLen, &msg); + tSerializeSEpSet(pMsg->pCont, contLen, &epSet); pMsg->contLen = contLen; } } static inline void dmSendRedirectRsp(SRpcMsg *pMsg, const SEpSet *pNewEpSet) { SRpcMsg rsp = {.code = TSDB_CODE_RPC_REDIRECT, .info = pMsg->info}; - SMEpSet msg = {.epSet = *pNewEpSet}; - int32_t contLen = tSerializeSMEpSet(NULL, 0, &msg); + int32_t contLen = tSerializeSEpSet(NULL, 0, pNewEpSet); rsp.pCont = rpcMallocCont(contLen); if (rsp.pCont == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; } else { - tSerializeSMEpSet(rsp.pCont, contLen, &msg); + tSerializeSEpSet(rsp.pCont, contLen, pNewEpSet); rsp.contLen = contLen; } dmSendRsp(&rsp); diff --git a/source/dnode/mgmt/node_util/inc/dmUtil.h b/source/dnode/mgmt/node_util/inc/dmUtil.h index 4946669678cd0fd17a22b935aa9e2613e58d73db..0d921c2e8b8d810891d1718648f1aead826f9116 100644 --- a/source/dnode/mgmt/node_util/inc/dmUtil.h +++ b/source/dnode/mgmt/node_util/inc/dmUtil.h @@ -90,8 +90,8 @@ typedef enum { typedef int32_t (*ProcessCreateNodeFp)(EDndNodeType ntype, SRpcMsg *pMsg); typedef int32_t (*ProcessDropNodeFp)(EDndNodeType ntype, SRpcMsg *pMsg); typedef void (*SendMonitorReportFp)(); -typedef void (*GetVnodeLoadsFp)(); -typedef void (*GetMnodeLoadsFp)(); +typedef void (*GetVnodeLoadsFp)(SMonVloadInfo *pInfo); +typedef void (*GetMnodeLoadsFp)(SMonMloadInfo *pInfo); typedef struct { int32_t dnodeId; diff --git a/source/dnode/mgmt/node_util/src/dmEps.c b/source/dnode/mgmt/node_util/src/dmEps.c index e0af20e41bfef194d90d30316c16042522e7f87d..332dd9a58a6e14f0e0eca104142b9d1258f51cb6 100644 --- a/source/dnode/mgmt/node_util/src/dmEps.c +++ b/source/dnode/mgmt/node_util/src/dmEps.c @@ -326,6 +326,7 @@ void dmGetMnodeEpSetForRedirect(SDnodeData *pData, SRpcMsg *pMsg, SEpSet *pEpSet } void dmSetMnodeEpSet(SDnodeData *pData, SEpSet *pEpSet) { + if (memcmp(pEpSet, &pData->mnodeEps, sizeof(SEpSet)) == 0) return; taosThreadRwlockWrlock(&pData->lock); pData->mnodeEps = *pEpSet; taosThreadRwlockUnlock(&pData->lock); diff --git a/source/dnode/mgmt/test/CMakeLists.txt b/source/dnode/mgmt/test/CMakeLists.txt index e1656ceb34d222fb13ef524b087349756d46d6ff..6b1919bf1862b5eeca9047de4731dae306ca275a 100644 --- a/source/dnode/mgmt/test/CMakeLists.txt +++ b/source/dnode/mgmt/test/CMakeLists.txt @@ -3,7 +3,7 @@ if(${BUILD_TEST}) add_subdirectory(qnode) add_subdirectory(bnode) add_subdirectory(snode) - add_subdirectory(mnode) + #add_subdirectory(mnode) add_subdirectory(vnode) add_subdirectory(sut) endif(${BUILD_TEST}) diff --git a/source/dnode/mgmt/test/bnode/dbnode.cpp b/source/dnode/mgmt/test/bnode/dbnode.cpp index 0568b30245d77e2a8dd7d2d227484477790b3270..c2a9873e5b0e4b2ddca8007e76d10e4e69fa3567 100644 --- a/source/dnode/mgmt/test/bnode/dbnode.cpp +++ b/source/dnode/mgmt/test/bnode/dbnode.cpp @@ -14,7 +14,7 @@ class DndTestBnode : public ::testing::Test { protected: static void SetUpTestSuite() { - test.Init("/tmp/dbnodeTest", 9112); + test.Init(TD_TMP_DIR_PATH "dbnodeTest", 9112); taosMsleep(1100); } static void TearDownTestSuite() { test.Cleanup(); } diff --git a/source/dnode/mgmt/test/mnode/CMakeLists.txt b/source/dnode/mgmt/test/mnode/CMakeLists.txt index e83f5dbbec9e84feac3738838e8c96e6dd3f3a3b..788cf53976185f5737c6571232e6c25add05e189 100644 --- a/source/dnode/mgmt/test/mnode/CMakeLists.txt +++ b/source/dnode/mgmt/test/mnode/CMakeLists.txt @@ -4,7 +4,7 @@ target_link_libraries( dmnodeTest sut ) -add_test( - NAME dmnodeTest - COMMAND dmnodeTest -) +#add_test( +# NAME dmnodeTest +# COMMAND dmnodeTest +#) diff --git a/source/dnode/mgmt/test/mnode/dmnode.cpp b/source/dnode/mgmt/test/mnode/dmnode.cpp index 98b50e96cfc84168ebecdb7d76ba23884c001d45..8c945b50ac48b4b1e290875c58a98f168971bc37 100644 --- a/source/dnode/mgmt/test/mnode/dmnode.cpp +++ b/source/dnode/mgmt/test/mnode/dmnode.cpp @@ -13,7 +13,7 @@ class DndTestMnode : public ::testing::Test { protected: - static void SetUpTestSuite() { test.Init("/tmp/dmnodeTest", 9114); } + static void SetUpTestSuite() { test.Init(TD_TMP_DIR_PATH "dmnodeTest", 9114); } static void TearDownTestSuite() { test.Cleanup(); } static Testbase test; diff --git a/source/dnode/mgmt/test/qnode/dqnode.cpp b/source/dnode/mgmt/test/qnode/dqnode.cpp index 2430419bef4e45e625ce60060849baa63eee3934..ef51be47a68d8772507dcee7ae8fae5b29d82bf6 100644 --- a/source/dnode/mgmt/test/qnode/dqnode.cpp +++ b/source/dnode/mgmt/test/qnode/dqnode.cpp @@ -13,7 +13,7 @@ class DndTestQnode : public ::testing::Test { protected: - static void SetUpTestSuite() { test.Init("/tmp/dqnodeTest", 9111); } + static void SetUpTestSuite() { test.Init(TD_TMP_DIR_PATH "dqnodeTest", 9111); } static void TearDownTestSuite() { test.Cleanup(); } static Testbase test; diff --git a/source/dnode/mgmt/test/snode/dsnode.cpp b/source/dnode/mgmt/test/snode/dsnode.cpp index 9ade616f191be10af24474fbf0f5581f30a1c061..9ae0fbdc542bb145c3b6e8d45fe355f1b187450d 100644 --- a/source/dnode/mgmt/test/snode/dsnode.cpp +++ b/source/dnode/mgmt/test/snode/dsnode.cpp @@ -13,7 +13,7 @@ class DndTestSnode : public ::testing::Test { protected: - static void SetUpTestSuite() { test.Init("/tmp/dsnodeTest", 9113); } + static void SetUpTestSuite() { test.Init(TD_TMP_DIR_PATH "dsnodeTest", 9113); } static void TearDownTestSuite() { test.Cleanup(); } static Testbase test; diff --git a/source/dnode/mgmt/test/sut/src/sut.cpp b/source/dnode/mgmt/test/sut/src/sut.cpp index 7bfa0417afbd957f81603371a3bb8f476868689e..6ef94481ea8edd66317f4d2a7b01bc3234ccd4d9 100644 --- a/source/dnode/mgmt/test/sut/src/sut.cpp +++ b/source/dnode/mgmt/test/sut/src/sut.cpp @@ -48,7 +48,7 @@ void Testbase::Init(const char* path, int16_t port) { strcpy(tsDataDir, path); taosRemoveDir(path); taosMkDir(path); - InitLog("/tmp/td"); + InitLog(TD_TMP_DIR_PATH "td"); server.Start(); client.Init("root", "taosdata"); diff --git a/source/dnode/mgmt/test/vnode/vnode.cpp b/source/dnode/mgmt/test/vnode/vnode.cpp index bddf9518195d162c3934f8bb33fc34953f2f7830..8aba4f81b59ee35ff61616f0ada5056c8d76074a 100644 --- a/source/dnode/mgmt/test/vnode/vnode.cpp +++ b/source/dnode/mgmt/test/vnode/vnode.cpp @@ -13,7 +13,7 @@ class DndTestVnode : public ::testing::Test { protected: - static void SetUpTestSuite() { test.Init("/tmp/dvnodeTest", 9115); } + static void SetUpTestSuite() { test.Init(TD_TMP_DIR_PATH "dvnodeTest", 9115); } static void TearDownTestSuite() { test.Cleanup(); } static Testbase test; diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h index 81f4c5ed1ef87431b639d256acde0faa596692fe..4d5aab4590f06ed5cfb944a9b60029819651e65e 100644 --- a/source/dnode/mnode/impl/inc/mndDef.h +++ b/source/dnode/mnode/impl/inc/mndDef.h @@ -53,6 +53,11 @@ typedef enum { MND_AUTH_MAX } EAuthOp; +typedef enum { + TRN_STEP_LOG = 1, + TRN_STEP_ACTION = 2, +} ETrnStep; + typedef enum { TRN_STAGE_PREPARE = 0, TRN_STAGE_REDO_LOG = 1, @@ -67,30 +72,34 @@ typedef enum { typedef enum { TRN_TYPE_BASIC_SCOPE = 1000, - TRN_TYPE_CREATE_USER = 1001, - TRN_TYPE_ALTER_USER = 1002, - TRN_TYPE_DROP_USER = 1003, - TRN_TYPE_CREATE_FUNC = 1004, - TRN_TYPE_DROP_FUNC = 1005, - - TRN_TYPE_CREATE_SNODE = 1006, - TRN_TYPE_DROP_SNODE = 1007, - TRN_TYPE_CREATE_QNODE = 1008, - TRN_TYPE_DROP_QNODE = 1009, - TRN_TYPE_CREATE_BNODE = 1010, - TRN_TYPE_DROP_BNODE = 1011, - TRN_TYPE_CREATE_MNODE = 1012, - TRN_TYPE_DROP_MNODE = 1013, - TRN_TYPE_CREATE_TOPIC = 1014, - TRN_TYPE_DROP_TOPIC = 1015, - TRN_TYPE_SUBSCRIBE = 1016, - TRN_TYPE_REBALANCE = 1017, - TRN_TYPE_COMMIT_OFFSET = 1018, - TRN_TYPE_CREATE_STREAM = 1019, - TRN_TYPE_DROP_STREAM = 1020, - TRN_TYPE_ALTER_STREAM = 1021, - TRN_TYPE_CONSUMER_LOST = 1022, - TRN_TYPE_CONSUMER_RECOVER = 1023, + TRN_TYPE_CREATE_ACCT = 1001, + TRN_TYPE_CREATE_CLUSTER = 1002, + TRN_TYPE_CREATE_USER = 1003, + TRN_TYPE_ALTER_USER = 1004, + TRN_TYPE_DROP_USER = 1005, + TRN_TYPE_CREATE_FUNC = 1006, + TRN_TYPE_DROP_FUNC = 1007, + + TRN_TYPE_CREATE_SNODE = 1010, + TRN_TYPE_DROP_SNODE = 1011, + TRN_TYPE_CREATE_QNODE = 1012, + TRN_TYPE_DROP_QNODE = 10013, + TRN_TYPE_CREATE_BNODE = 1014, + TRN_TYPE_DROP_BNODE = 1015, + TRN_TYPE_CREATE_MNODE = 1016, + TRN_TYPE_DROP_MNODE = 1017, + + TRN_TYPE_CREATE_TOPIC = 1020, + TRN_TYPE_DROP_TOPIC = 1021, + TRN_TYPE_SUBSCRIBE = 1022, + TRN_TYPE_REBALANCE = 1023, + TRN_TYPE_COMMIT_OFFSET = 1024, + TRN_TYPE_CREATE_STREAM = 1025, + TRN_TYPE_DROP_STREAM = 1026, + TRN_TYPE_ALTER_STREAM = 1027, + TRN_TYPE_CONSUMER_LOST = 1028, + TRN_TYPE_CONSUMER_RECOVER = 1029, + TRN_TYPE_DROP_CGROUP = 1030, TRN_TYPE_BASIC_SCOPE_END, TRN_TYPE_GLOBAL_SCOPE = 2000, @@ -120,6 +129,11 @@ typedef enum { TRN_POLICY_RETRY = 1, } ETrnPolicy; +typedef enum { + TRN_EXEC_PARALLEL = 0, + TRN_EXEC_ONE_BY_ONE = 1, +} ETrnExecType; + typedef enum { DND_REASON_ONLINE = 0, DND_REASON_STATUS_MSG_TIMEOUT, @@ -148,6 +162,7 @@ typedef struct { ETrnStage stage; ETrnPolicy policy; ETrnType type; + ETrnExecType parallel; int32_t code; int32_t failedTimes; SRpcHandleInfo rpcInfo; @@ -196,9 +211,8 @@ typedef struct { int32_t id; int64_t createdTime; int64_t updateTime; - ESyncState role; - int32_t roleTerm; - int64_t roleTime; + ESyncState state; + int64_t stateStartTime; SDnodeObj* pDnode; } SMnodeObj; @@ -328,6 +342,7 @@ typedef struct { int64_t compStorage; int64_t pointsWritten; int8_t compact; + int8_t isTsma; int8_t replica; SVnodeGid vnodeGid[TSDB_MAX_REPLICA]; } SVgObj; @@ -364,7 +379,6 @@ typedef struct { int64_t updateTime; int64_t uid; int64_t dbUid; - int32_t version; int32_t tagVer; int32_t colVer; int32_t nextColId; @@ -587,7 +601,8 @@ typedef struct { int8_t status; int8_t createdBy; // STREAM_CREATED_BY__USER or SMA int32_t fixedSinkVgId; // 0 for shuffle - int64_t smaId; // 0 for unused + SVgObj fixedSinkVg; + int64_t smaId; // 0 for unused int8_t trigger; int32_t triggerParam; int64_t waterMark; diff --git a/source/dnode/mnode/impl/inc/mndInt.h b/source/dnode/mnode/impl/inc/mndInt.h index 5258fa9e023a49e3fdf4ea41b2785d3ed93a27a8..6661347e4206b28d6977b622bc4cd8777b34abb7 100644 --- a/source/dnode/mnode/impl/inc/mndInt.h +++ b/source/dnode/mnode/impl/inc/mndInt.h @@ -19,6 +19,7 @@ #include "mndDef.h" #include "sdb.h" +#include "syncTools.h" #include "tcache.h" #include "tdatablock.h" #include "tglobal.h" @@ -31,12 +32,14 @@ extern "C" { #endif +// clang-format off #define mFatal(...) { if (mDebugFlag & DEBUG_FATAL) { taosPrintLog("MND FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }} #define mError(...) { if (mDebugFlag & DEBUG_ERROR) { taosPrintLog("MND ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} #define mWarn(...) { if (mDebugFlag & DEBUG_WARN) { taosPrintLog("MND WARN ", DEBUG_WARN, 255, __VA_ARGS__); }} #define mInfo(...) { if (mDebugFlag & DEBUG_INFO) { taosPrintLog("MND ", DEBUG_INFO, 255, __VA_ARGS__); }} #define mDebug(...) { if (mDebugFlag & DEBUG_DEBUG) { taosPrintLog("MND ", DEBUG_DEBUG, mDebugFlag, __VA_ARGS__); }} #define mTrace(...) { if (mDebugFlag & DEBUG_TRACE) { taosPrintLog("MND ", DEBUG_TRACE, mDebugFlag, __VA_ARGS__); }} +// clang-format on #define SYSTABLE_SCH_TABLE_NAME_LEN ((TSDB_TABLE_NAME_LEN - 1) + VARSTR_HEADER_SIZE) #define SYSTABLE_SCH_DB_NAME_LEN ((TSDB_DB_NAME_LEN - 1) + VARSTR_HEADER_SIZE) @@ -72,11 +75,12 @@ typedef struct { } STelemMgmt; typedef struct { - int32_t errCode; - sem_t syncSem; - SWal *pWal; - SSyncNode *pSyncNode; - ESyncState state; + SWal *pWal; + sem_t syncSem; + int64_t sync; + bool standby; + int32_t errCode; + int32_t transId; } SSyncMgmt; typedef struct { @@ -85,33 +89,45 @@ typedef struct { } SGrantInfo; typedef struct SMnode { - int32_t selfId; - int64_t clusterId; - TdThread thread; - bool stopped; - int8_t replica; - int8_t selfIndex; - SReplica replicas[TSDB_MAX_REPLICA]; - char *path; - int64_t checkTime; - SSdb *pSdb; - SMgmtWrapper *pWrapper; - SArray *pSteps; - SQHandle *pQuery; - SShowMgmt showMgmt; - SProfileMgmt profileMgmt; - STelemMgmt telemMgmt; - SSyncMgmt syncMgmt; - SHashObj *infosMeta; - SHashObj *perfsMeta; - SGrantInfo grant; - MndMsgFp msgFp[TDMT_MAX]; - SMsgCb msgCb; + int32_t selfDnodeId; + int64_t clusterId; + TdThread thread; + TdThreadRwlock lock; + int32_t rpcRef; + int32_t syncRef; + bool stopped; + bool restored; + bool deploy; + int8_t replica; + int8_t selfIndex; + SReplica replicas[TSDB_MAX_REPLICA]; + char *path; + int64_t checkTime; + SSdb *pSdb; + SArray *pSteps; + SQHandle *pQuery; + SHashObj *infosMeta; + SHashObj *perfsMeta; + SShowMgmt showMgmt; + SProfileMgmt profileMgmt; + STelemMgmt telemMgmt; + SSyncMgmt syncMgmt; + SGrantInfo grant; + MndMsgFp msgFp[TDMT_MAX]; + SMsgCb msgCb; } SMnode; void mndSetMsgHandle(SMnode *pMnode, tmsg_t msgType, MndMsgFp fp); int64_t mndGenerateUid(char *name, int32_t len); +int32_t mndAcquireRpcRef(SMnode *pMnode); +void mndReleaseRpcRef(SMnode *pMnode); +void mndSetRestore(SMnode *pMnode, bool restored); +void mndSetStop(SMnode *pMnode); +bool mndGetStop(SMnode *pMnode); +int32_t mndAcquireSyncRef(SMnode *pMnode); +void mndReleaseSyncRef(SMnode *pMnode); + #ifdef __cplusplus } #endif diff --git a/source/dnode/mnode/impl/inc/mndMnode.h b/source/dnode/mnode/impl/inc/mndMnode.h index a5cdfa1061034c25f2162ffe1812ea3ee235bf36..fd62b3ce75a8691c95a9ecf8ec70daae272145c0 100644 --- a/source/dnode/mnode/impl/inc/mndMnode.h +++ b/source/dnode/mnode/impl/inc/mndMnode.h @@ -28,7 +28,6 @@ SMnodeObj *mndAcquireMnode(SMnode *pMnode, int32_t mnodeId); void mndReleaseMnode(SMnode *pMnode, SMnodeObj *pObj); bool mndIsMnode(SMnode *pMnode, int32_t dnodeId); void mndGetMnodeEpSet(SMnode *pMnode, SEpSet *pEpSet); -void mndUpdateMnodeRole(SMnode *pMnode); #ifdef __cplusplus } diff --git a/source/dnode/mnode/impl/inc/mndOffset.h b/source/dnode/mnode/impl/inc/mndOffset.h index 900181858bd724873ea948d450e830cc83643463..f7569b964875bbffe90c8fc5525fda8f68b688b8 100644 --- a/source/dnode/mnode/impl/inc/mndOffset.h +++ b/source/dnode/mnode/impl/inc/mndOffset.h @@ -39,6 +39,7 @@ static FORCE_INLINE int32_t mndMakePartitionKey(char *key, const char *cgroup, c int32_t mndDropOffsetByDB(SMnode *pMnode, STrans *pTrans, SDbObj *pDb); int32_t mndDropOffsetByTopic(SMnode *pMnode, STrans *pTrans, const char *topic); +int32_t mndDropOffsetBySubKey(SMnode *pMnode, STrans *pTrans, const char *subKey); bool mndOffsetFromTopic(SMqOffsetObj *pOffset, const char *topic); diff --git a/source/dnode/mnode/impl/inc/mndScheduler.h b/source/dnode/mnode/impl/inc/mndScheduler.h index 33af040915688fd83c4a82af3c89047be5d20dae..9f4e377dd17dffc94ab04366e2c1ba61e170b92f 100644 --- a/source/dnode/mnode/impl/inc/mndScheduler.h +++ b/source/dnode/mnode/impl/inc/mndScheduler.h @@ -29,7 +29,8 @@ int32_t mndSchedInitSubEp(SMnode* pMnode, const SMqTopicObj* pTopic, SMqSubscrib int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream); -int32_t mndConvertRSmaTask(const char* ast, int8_t triggerType, int64_t watermark, char** pStr, int32_t* pLen); +int32_t mndConvertRSmaTask(const char* ast, int64_t uid, int8_t triggerType, int64_t watermark, char** pStr, + int32_t* pLen); #ifdef __cplusplus } diff --git a/source/dnode/mnode/impl/inc/mndSubscribe.h b/source/dnode/mnode/impl/inc/mndSubscribe.h index 50cede62ce424ae855f46ba0f359b5088058e4d1..d91c2bd4c3f69063420f3a775f6183e3eaa3824d 100644 --- a/source/dnode/mnode/impl/inc/mndSubscribe.h +++ b/source/dnode/mnode/impl/inc/mndSubscribe.h @@ -33,6 +33,7 @@ int32_t mndMakeSubscribeKey(char *key, const char *cgroup, const char *topicName int32_t mndDropSubByDB(SMnode *pMnode, STrans *pTrans, SDbObj *pDb); int32_t mndDropSubByTopic(SMnode *pMnode, STrans *pTrans, const char *topic); +int32_t mndSetDropSubCommitLogs(SMnode *pMnode, STrans *pTrans, SMqSubscribeObj *pSub); #ifdef __cplusplus } diff --git a/source/dnode/mnode/impl/inc/mndSync.h b/source/dnode/mnode/impl/inc/mndSync.h index fe557cdeac0874dc815b5fe83b795a4b01bbfcec..cb9d70d5ee48f542dbe58100328b7f2284ea2926 100644 --- a/source/dnode/mnode/impl/inc/mndSync.h +++ b/source/dnode/mnode/impl/inc/mndSync.h @@ -25,7 +25,9 @@ extern "C" { int32_t mndInitSync(SMnode *pMnode); void mndCleanupSync(SMnode *pMnode); bool mndIsMaster(SMnode *pMnode); -int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw); +int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw, int32_t transId); +void mndSyncStart(SMnode *pMnode); +void mndSyncStop(SMnode *pMnode); #ifdef __cplusplus } diff --git a/source/dnode/mnode/impl/inc/mndTopic.h b/source/dnode/mnode/impl/inc/mndTopic.h index d7e6f9c87bee034856e7d512cb1e38a900e1412e..c5c4800e0295fa48ee4bf9669200f7ce7a31eff8 100644 --- a/source/dnode/mnode/impl/inc/mndTopic.h +++ b/source/dnode/mnode/impl/inc/mndTopic.h @@ -35,7 +35,7 @@ int32_t mndDropTopicByDB(SMnode *pMnode, STrans *pTrans, SDbObj *pDb); const char *mndTopicGetShowName(const char topic[TSDB_TOPIC_FNAME_LEN]); -int32_t mndSetTopicRedoLogs(SMnode *pMnode, STrans *pTrans, SMqTopicObj *pTopic); +int32_t mndSetTopicCommitLogs(SMnode *pMnode, STrans *pTrans, SMqTopicObj *pTopic); #ifdef __cplusplus } diff --git a/source/dnode/mnode/impl/inc/mndTrans.h b/source/dnode/mnode/impl/inc/mndTrans.h index 84e7a17192b7ba41028989d8bc58e88229731e10..ce302a88e3fb4735b99ce2037b2dcb378254a844 100644 --- a/source/dnode/mnode/impl/inc/mndTrans.h +++ b/source/dnode/mnode/impl/inc/mndTrans.h @@ -22,6 +22,13 @@ extern "C" { #endif +typedef enum { + TRANS_START_FUNC_TEST = 1, + TRANS_STOP_FUNC_TEST = 2, + TRANS_START_FUNC_MQ_REB = 3, + TRANS_STOP_FUNC_TEST_MQ_REB = 4, +} ETrnFunc; + typedef struct { SEpSet epSet; tmsg_t msgType; @@ -33,12 +40,17 @@ typedef struct { void *pCont; } STransAction; -typedef enum { - TEST_TRANS_START_FUNC = 1, - TEST_TRANS_STOP_FUNC = 2, - MQ_REB_TRANS_START_FUNC = 3, - MQ_REB_TRANS_STOP_FUNC = 4, -} ETrnFuncType; +typedef struct { + SSdbRaw *pRaw; +} STransLog; + +typedef struct { + ETrnStep stepType; + STransAction redoAction; + STransAction undoAction; + STransLog redoLog; + STransLog undoLog; +} STransStep; typedef void (*TransCbFp)(SMnode *pMnode, void *param, int32_t paramLen); @@ -55,8 +67,9 @@ int32_t mndTransAppendCommitlog(STrans *pTrans, SSdbRaw *pRaw); int32_t mndTransAppendRedoAction(STrans *pTrans, STransAction *pAction); int32_t mndTransAppendUndoAction(STrans *pTrans, STransAction *pAction); void mndTransSetRpcRsp(STrans *pTrans, void *pCont, int32_t contLen); -void mndTransSetCb(STrans *pTrans, ETrnFuncType startFunc, ETrnFuncType stopFunc, void *param, int32_t paramLen); +void mndTransSetCb(STrans *pTrans, ETrnFunc startFunc, ETrnFunc stopFunc, void *param, int32_t paramLen); void mndTransSetDbInfo(STrans *pTrans, SDbObj *pDb); +void mndTransSetExecOneByOne(STrans *pTrans); int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans); void mndTransProcessRsp(SRpcMsg *pRsp); diff --git a/source/dnode/mnode/impl/inc/mndVgroup.h b/source/dnode/mnode/impl/inc/mndVgroup.h index 9bf7b6eb8937cee5078ddab38e04810e77734d05..c9099b6b050481b78030befbe93de59139df1b27 100644 --- a/source/dnode/mnode/impl/inc/mndVgroup.h +++ b/source/dnode/mnode/impl/inc/mndVgroup.h @@ -30,6 +30,7 @@ SSdbRaw *mndVgroupActionEncode(SVgObj *pVgroup); SEpSet mndGetVgroupEpset(SMnode *pMnode, const SVgObj *pVgroup); int32_t mndGetVnodesNum(SMnode *pMnode, int32_t dnodeId); +int32_t mndAllocSmaVgroup(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup); int32_t mndAllocVgroup(SMnode *pMnode, SDbObj *pDb, SVgObj **ppVgroups); SArray *mndBuildDnodesArray(SMnode *pMnode); int32_t mndAddVnodeToVgroup(SMnode *pMnode, SVgObj *pVgroup, SArray *pArray); diff --git a/source/dnode/mnode/impl/src/mndAcct.c b/source/dnode/mnode/impl/src/mndAcct.c index 52b9ac62e67c652a914e560e9551c08606971af4..a4fde4b70670952dbf14554aa0fce15f77cb49f5 100644 --- a/source/dnode/mnode/impl/src/mndAcct.c +++ b/source/dnode/mnode/impl/src/mndAcct.c @@ -16,6 +16,7 @@ #define _DEFAULT_SOURCE #include "mndAcct.h" #include "mndShow.h" +#include "mndTrans.h" #define ACCT_VER_NUMBER 1 #define ACCT_RESERVE_SIZE 128 @@ -31,14 +32,16 @@ static int32_t mndProcessAlterAcctReq(SRpcMsg *pReq); static int32_t mndProcessDropAcctReq(SRpcMsg *pReq); int32_t mndInitAcct(SMnode *pMnode) { - SSdbTable table = {.sdbType = SDB_ACCT, - .keyType = SDB_KEY_BINARY, - .deployFp = mndCreateDefaultAcct, - .encodeFp = (SdbEncodeFp)mndAcctActionEncode, - .decodeFp = (SdbDecodeFp)mndAcctActionDecode, - .insertFp = (SdbInsertFp)mndAcctActionInsert, - .updateFp = (SdbUpdateFp)mndAcctActionUpdate, - .deleteFp = (SdbDeleteFp)mndAcctActionDelete}; + SSdbTable table = { + .sdbType = SDB_ACCT, + .keyType = SDB_KEY_BINARY, + .deployFp = mndCreateDefaultAcct, + .encodeFp = (SdbEncodeFp)mndAcctActionEncode, + .decodeFp = (SdbDecodeFp)mndAcctActionDecode, + .insertFp = (SdbInsertFp)mndAcctActionInsert, + .updateFp = (SdbUpdateFp)mndAcctActionUpdate, + .deleteFp = (SdbDeleteFp)mndAcctActionDelete, + }; mndSetMsgHandle(pMnode, TDMT_MND_CREATE_ACCT, mndProcessCreateAcctReq); mndSetMsgHandle(pMnode, TDMT_MND_ALTER_ACCT, mndProcessAlterAcctReq); @@ -56,25 +59,52 @@ static int32_t mndCreateDefaultAcct(SMnode *pMnode) { acctObj.updateTime = acctObj.createdTime; acctObj.acctId = 1; acctObj.status = 0; - acctObj.cfg = (SAcctCfg){.maxUsers = INT32_MAX, - .maxDbs = INT32_MAX, - .maxStbs = INT32_MAX, - .maxTbs = INT32_MAX, - .maxTimeSeries = INT32_MAX, - .maxStreams = INT32_MAX, - .maxFuncs = INT32_MAX, - .maxConsumers = INT32_MAX, - .maxConns = INT32_MAX, - .maxTopics = INT32_MAX, - .maxStorage = INT64_MAX, - .accessState = TSDB_VN_ALL_ACCCESS}; + acctObj.cfg = (SAcctCfg){ + .maxUsers = INT32_MAX, + .maxDbs = INT32_MAX, + .maxStbs = INT32_MAX, + .maxTbs = INT32_MAX, + .maxTimeSeries = INT32_MAX, + .maxStreams = INT32_MAX, + .maxFuncs = INT32_MAX, + .maxConsumers = INT32_MAX, + .maxConns = INT32_MAX, + .maxTopics = INT32_MAX, + .maxStorage = INT64_MAX, + .accessState = TSDB_VN_ALL_ACCCESS, + }; SSdbRaw *pRaw = mndAcctActionEncode(&acctObj); if (pRaw == NULL) return -1; sdbSetRawStatus(pRaw, SDB_STATUS_READY); mDebug("acct:%s, will be created while deploy sdb, raw:%p", acctObj.acct, pRaw); +#if 0 return sdbWrite(pMnode->pSdb, pRaw); +#else + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_CREATE_ACCT, NULL); + if (pTrans == NULL) { + mError("acct:%s, failed to create since %s", acctObj.acct, terrstr()); + return -1; + } + mDebug("trans:%d, used to create acct:%s", pTrans->id, acctObj.acct); + + if (mndTransAppendCommitlog(pTrans, pRaw) != 0) { + mError("trans:%d, failed to commit redo log since %s", pTrans->id, terrstr()); + mndTransDrop(pTrans); + return -1; + } + sdbSetRawStatus(pRaw, SDB_STATUS_READY); + + if (mndTransPrepare(pMnode, pTrans) != 0) { + mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr()); + mndTransDrop(pTrans); + return -1; + } + + mndTransDrop(pTrans); + return 0; +#endif } static SSdbRaw *mndAcctActionEncode(SAcctObj *pAcct) { diff --git a/source/dnode/mnode/impl/src/mndCluster.c b/source/dnode/mnode/impl/src/mndCluster.c index f6f6813b97ece46b82428c02df24d8132cf9b697..a421be5c062a709bdd1e74f583a95142da2aac82 100644 --- a/source/dnode/mnode/impl/src/mndCluster.c +++ b/source/dnode/mnode/impl/src/mndCluster.c @@ -16,6 +16,7 @@ #define _DEFAULT_SOURCE #include "mndCluster.h" #include "mndShow.h" +#include "mndTrans.h" #define CLUSTER_VER_NUMBE 1 #define CLUSTER_RESERVE_SIZE 64 @@ -143,6 +144,7 @@ _OVER: static int32_t mndClusterActionInsert(SSdb *pSdb, SClusterObj *pCluster) { mTrace("cluster:%" PRId64 ", perform insert action, row:%p", pCluster->id, pCluster); + pSdb->pMnode->clusterId = pCluster->id; return 0; } @@ -177,7 +179,32 @@ static int32_t mndCreateDefaultCluster(SMnode *pMnode) { sdbSetRawStatus(pRaw, SDB_STATUS_READY); mDebug("cluster:%" PRId64 ", will be created while deploy sdb, raw:%p", clusterObj.id, pRaw); +#if 0 return sdbWrite(pMnode->pSdb, pRaw); +#else + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_CREATE_CLUSTER, NULL); + if (pTrans == NULL) { + mError("cluster:%" PRId64 ", failed to create since %s", clusterObj.id, terrstr()); + return -1; + } + mDebug("trans:%d, used to create cluster:%" PRId64, pTrans->id, clusterObj.id); + + if (mndTransAppendCommitlog(pTrans, pRaw) != 0) { + mError("trans:%d, failed to commit redo log since %s", pTrans->id, terrstr()); + mndTransDrop(pTrans); + return -1; + } + sdbSetRawStatus(pRaw, SDB_STATUS_READY); + + if (mndTransPrepare(pMnode, pTrans) != 0) { + mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr()); + mndTransDrop(pTrans); + return -1; + } + + mndTransDrop(pTrans); + return 0; +#endif } static int32_t mndRetrieveClusters(SRpcMsg *pMsg, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows) { diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c index 8b2799833b8acbfd658fb4e6bc034af92d6e415e..7cebeb35f5bb9e3f2b363c438a1ce70ad3296717 100644 --- a/source/dnode/mnode/impl/src/mndConsumer.c +++ b/source/dnode/mnode/impl/src/mndConsumer.c @@ -419,7 +419,9 @@ static int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { SMqTopicObj topicObj = {0}; memcpy(&topicObj, pTopic, sizeof(SMqTopicObj)); topicObj.refConsumerCnt = pTopic->refConsumerCnt + 1; - if (mndSetTopicRedoLogs(pMnode, pTrans, &topicObj) != 0) goto SUBSCRIBE_OVER; + mInfo("subscribe topic %s by consumer %ld cgroup %s, refcnt %d", pTopic->name, consumerId, cgroup, + topicObj.refConsumerCnt); + if (mndSetTopicCommitLogs(pMnode, pTrans, &topicObj) != 0) goto SUBSCRIBE_OVER; mndReleaseTopic(pMnode, pTopic); } diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c index 6921235f8bfdb007811dc92ccff0514a68999824..95d3383ee10e378c4c5a66e9d16de4fda90db9ed 100644 --- a/source/dnode/mnode/impl/src/mndDb.c +++ b/source/dnode/mnode/impl/src/mndDb.c @@ -1155,7 +1155,7 @@ static void mndBuildDBVgroupInfo(SDbObj *pDb, SMnode *pMnode, SArray *pVgList) { pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup); if (pIter == NULL) break; - if (NULL == pDb || pVgroup->dbUid == pDb->uid) { + if ((NULL == pDb || pVgroup->dbUid == pDb->uid) && !pVgroup->isTsma) { SVgroupInfo vgInfo = {0}; vgInfo.vgId = pVgroup->vgId; vgInfo.hashBegin = pVgroup->hashBegin; diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c index 01ff08cef9a8ebfd681038aea915fa7906d5ed12..22f858c60bdbfd56652570195b89cbf3f207651a 100644 --- a/source/dnode/mnode/impl/src/mndDnode.c +++ b/source/dnode/mnode/impl/src/mndDnode.c @@ -58,14 +58,16 @@ static int32_t mndRetrieveDnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB static void mndCancelGetNextDnode(SMnode *pMnode, void *pIter); int32_t mndInitDnode(SMnode *pMnode) { - SSdbTable table = {.sdbType = SDB_DNODE, - .keyType = SDB_KEY_INT32, - .deployFp = (SdbDeployFp)mndCreateDefaultDnode, - .encodeFp = (SdbEncodeFp)mndDnodeActionEncode, - .decodeFp = (SdbDecodeFp)mndDnodeActionDecode, - .insertFp = (SdbInsertFp)mndDnodeActionInsert, - .updateFp = (SdbUpdateFp)mndDnodeActionUpdate, - .deleteFp = (SdbDeleteFp)mndDnodeActionDelete}; + SSdbTable table = { + .sdbType = SDB_DNODE, + .keyType = SDB_KEY_INT32, + .deployFp = (SdbDeployFp)mndCreateDefaultDnode, + .encodeFp = (SdbEncodeFp)mndDnodeActionEncode, + .decodeFp = (SdbDecodeFp)mndDnodeActionDecode, + .insertFp = (SdbInsertFp)mndDnodeActionInsert, + .updateFp = (SdbUpdateFp)mndDnodeActionUpdate, + .deleteFp = (SdbDeleteFp)mndDnodeActionDelete, + }; mndSetMsgHandle(pMnode, TDMT_MND_CREATE_DNODE, mndProcessCreateDnodeReq); mndSetMsgHandle(pMnode, TDMT_MND_DROP_DNODE, mndProcessDropDnodeReq); @@ -90,13 +92,40 @@ static int32_t mndCreateDefaultDnode(SMnode *pMnode) { dnodeObj.updateTime = dnodeObj.createdTime; dnodeObj.port = pMnode->replicas[0].port; memcpy(&dnodeObj.fqdn, pMnode->replicas[0].fqdn, TSDB_FQDN_LEN); + snprintf(dnodeObj.ep, TSDB_EP_LEN, "%s:%u", dnodeObj.fqdn, dnodeObj.port); SSdbRaw *pRaw = mndDnodeActionEncode(&dnodeObj); if (pRaw == NULL) return -1; if (sdbSetRawStatus(pRaw, SDB_STATUS_READY) != 0) return -1; mDebug("dnode:%d, will be created while deploy sdb, raw:%p", dnodeObj.id, pRaw); + +#if 0 return sdbWrite(pMnode->pSdb, pRaw); +#else + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_CREATE_DNODE, NULL); + if (pTrans == NULL) { + mError("dnode:%s, failed to create since %s", dnodeObj.ep, terrstr()); + return -1; + } + mDebug("trans:%d, used to create dnode:%s", pTrans->id, dnodeObj.ep); + + if (mndTransAppendCommitlog(pTrans, pRaw) != 0) { + mError("trans:%d, failed to append commit log since %s", pTrans->id, terrstr()); + mndTransDrop(pTrans); + return -1; + } + sdbSetRawStatus(pRaw, SDB_STATUS_READY); + + if (mndTransPrepare(pMnode, pTrans) != 0) { + mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr()); + mndTransDrop(pTrans); + return -1; + } + + mndTransDrop(pTrans); + return 0; +#endif } static SSdbRaw *mndDnodeActionEncode(SDnodeObj *pDnode) { @@ -350,6 +379,15 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq) { mndReleaseVgroup(pMnode, pVgroup); } + SMnodeObj *pObj = mndAcquireMnode(pMnode, pDnode->id); + if (pObj != NULL) { + if (pObj->state != statusReq.mload.syncState) { + pObj->state = statusReq.mload.syncState; + pObj->stateStartTime = taosGetTimestampMs(); + } + mndReleaseMnode(pMnode, pObj); + } + int64_t curMs = taosGetTimestampMs(); bool online = mndIsDnodeOnline(pMnode, pDnode, curMs); bool dnodeChanged = (statusReq.dnodeVer != sdbGetTableVer(pMnode->pSdb, SDB_DNODE)); @@ -403,7 +441,7 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq) { pDnode->numOfSupportVnodes = statusReq.numOfSupportVnodes; SStatusRsp statusRsp = {0}; - statusRsp.dnodeVer = sdbGetTableVer(pMnode->pSdb, SDB_DNODE); + statusRsp.dnodeVer = sdbGetTableVer(pMnode->pSdb, SDB_DNODE) + sdbGetTableVer(pMnode->pSdb, SDB_MNODE); statusRsp.dnodeCfg.dnodeId = pDnode->id; statusRsp.dnodeCfg.clusterId = pMnode->clusterId; statusRsp.pDnodeEps = taosArrayInit(mndGetDnodeSize(pMnode), sizeof(SDnodeEp)); @@ -448,13 +486,13 @@ static int32_t mndCreateDnode(SMnode *pMnode, SRpcMsg *pReq, SCreateDnodeReq *pC } mDebug("trans:%d, used to create dnode:%s", pTrans->id, dnodeObj.ep); - SSdbRaw *pRedoRaw = mndDnodeActionEncode(&dnodeObj); - if (pRedoRaw == NULL || mndTransAppendRedolog(pTrans, pRedoRaw) != 0) { - mError("trans:%d, failed to append redo log since %s", pTrans->id, terrstr()); + SSdbRaw *pCommitRaw = mndDnodeActionEncode(&dnodeObj); + if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) { + mError("trans:%d, failed to append commit log since %s", pTrans->id, terrstr()); mndTransDrop(pTrans); return -1; } - sdbSetRawStatus(pRedoRaw, SDB_STATUS_READY); + sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY); if (mndTransPrepare(pMnode, pTrans) != 0) { mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr()); @@ -524,13 +562,13 @@ static int32_t mndDropDnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode) { } mDebug("trans:%d, used to drop dnode:%d", pTrans->id, pDnode->id); - SSdbRaw *pRedoRaw = mndDnodeActionEncode(pDnode); - if (pRedoRaw == NULL || mndTransAppendRedolog(pTrans, pRedoRaw) != 0) { - mError("trans:%d, failed to append redo log since %s", pTrans->id, terrstr()); + SSdbRaw *pCommitRaw = mndDnodeActionEncode(pDnode); + if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) { + mError("trans:%d, failed to append commit log since %s", pTrans->id, terrstr()); mndTransDrop(pTrans); return -1; } - sdbSetRawStatus(pRedoRaw, SDB_STATUS_DROPPED); + sdbSetRawStatus(pCommitRaw, SDB_STATUS_DROPPED); if (mndTransPrepare(pMnode, pTrans) != 0) { mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr()); @@ -701,7 +739,7 @@ static int32_t mndRetrieveDnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB colDataAppend(pColInfo, numOfRows, (const char *)&pDnode->id, false); char buf[tListLen(pDnode->ep) + VARSTR_HEADER_SIZE] = {0}; - STR_WITH_MAXSIZE_TO_VARSTR(buf, pDnode->ep, pShow->pMeta->pSchemas[cols].bytes); + STR_WITH_MAXSIZE_TO_VARSTR(buf, pDnode->ep, pShow->pMeta->pSchemas[cols].bytes); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, buf, false); diff --git a/source/dnode/mnode/impl/src/mnode.c b/source/dnode/mnode/impl/src/mndMain.c similarity index 64% rename from source/dnode/mnode/impl/src/mnode.c rename to source/dnode/mnode/impl/src/mndMain.c index 285ae030e1424680f6af999b6554629d8b7a7653..0ac36c20ed3463a5bd9e794c83edab22b1675408 100644 --- a/source/dnode/mnode/impl/src/mnode.c +++ b/source/dnode/mnode/impl/src/mndMain.c @@ -85,8 +85,7 @@ static void *mndThreadFp(void *param) { while (1) { lastTime++; taosMsleep(100); - if (pMnode->stopped) break; - if (!mndIsMaster(pMnode)) continue; + if (mndGetStop(pMnode)) break; if (lastTime % (tsTransPullupInterval * 10) == 0) { mndPullupTrans(pMnode); @@ -119,7 +118,6 @@ static int32_t mndInitTimer(SMnode *pMnode) { } static void mndCleanupTimer(SMnode *pMnode) { - pMnode->stopped = true; if (taosCheckPthreadValid(pMnode->thread)) { taosThreadJoin(pMnode->thread, NULL); taosThreadClear(&pMnode->thread); @@ -154,8 +152,14 @@ static int32_t mndInitSdb(SMnode *pMnode) { return 0; } -static int32_t mndDeploySdb(SMnode *pMnode) { return sdbDeploy(pMnode->pSdb); } -static int32_t mndReadSdb(SMnode *pMnode) { return sdbReadFile(pMnode->pSdb); } +static int32_t mndOpenSdb(SMnode *pMnode) { + if (!pMnode->deploy) { + return sdbReadFile(pMnode->pSdb); + } else { + // return sdbDeploy(pMnode->pSdb);; + return 0; + } +} static void mndCleanupSdb(SMnode *pMnode) { if (pMnode->pSdb) { @@ -177,7 +181,7 @@ static int32_t mndAllocStep(SMnode *pMnode, char *name, MndInitFp initFp, MndCle return 0; } -static int32_t mndInitSteps(SMnode *pMnode, bool deploy) { +static int32_t mndInitSteps(SMnode *pMnode) { if (mndAllocStep(pMnode, "mnode-sdb", mndInitSdb, mndCleanupSdb) != 0) return -1; if (mndAllocStep(pMnode, "mnode-trans", mndInitTrans, mndCleanupTrans) != 0) return -1; if (mndAllocStep(pMnode, "mnode-cluster", mndInitCluster, mndCleanupCluster) != 0) return -1; @@ -202,11 +206,7 @@ static int32_t mndInitSteps(SMnode *pMnode, bool deploy) { if (mndAllocStep(pMnode, "mnode-perfs", mndInitPerfs, mndCleanupPerfs) != 0) return -1; if (mndAllocStep(pMnode, "mnode-db", mndInitDb, mndCleanupDb) != 0) return -1; if (mndAllocStep(pMnode, "mnode-func", mndInitFunc, mndCleanupFunc) != 0) return -1; - if (deploy) { - if (mndAllocStep(pMnode, "mnode-sdb-deploy", mndDeploySdb, NULL) != 0) return -1; - } else { - if (mndAllocStep(pMnode, "mnode-sdb-read", mndReadSdb, NULL) != 0) return -1; - } + if (mndAllocStep(pMnode, "mnode-sdb", mndOpenSdb, NULL) != 0) return -1; if (mndAllocStep(pMnode, "mnode-profile", mndInitProfile, mndCleanupProfile) != 0) return -1; if (mndAllocStep(pMnode, "mnode-show", mndInitShow, mndCleanupShow) != 0) return -1; if (mndAllocStep(pMnode, "mnode-query", mndInitQuery, mndCleanupQuery) != 0) return -1; @@ -263,7 +263,8 @@ static void mndSetOptions(SMnode *pMnode, const SMnodeOpt *pOption) { pMnode->selfIndex = pOption->selfIndex; memcpy(&pMnode->replicas, pOption->replicas, sizeof(SReplica) * TSDB_MAX_REPLICA); pMnode->msgCb = pOption->msgCb; - pMnode->selfId = pOption->replicas[pOption->selfIndex].id; + pMnode->selfDnodeId = pOption->dnodeId; + pMnode->syncMgmt.standby = pOption->standby; } SMnode *mndOpen(const char *path, const SMnodeOpt *pOption) { @@ -280,6 +281,7 @@ SMnode *mndOpen(const char *path, const SMnodeOpt *pOption) { (void)taosParseTime(timestr, &pMnode->checkTime, (int32_t)strlen(timestr), TSDB_TIME_PRECISION_MILLI, 0); mndSetOptions(pMnode, pOption); + pMnode->deploy = pOption->deploy; pMnode->pSteps = taosArrayInit(24, sizeof(SMnodeStep)); if (pMnode->pSteps == NULL) { taosMemoryFree(pMnode); @@ -297,7 +299,7 @@ SMnode *mndOpen(const char *path, const SMnodeOpt *pOption) { return NULL; } - code = mndInitSteps(pMnode, pOption->deploy); + code = mndInitSteps(pMnode); if (code != 0) { code = terrno; mError("failed to open mnode since %s", terrstr()); @@ -315,7 +317,6 @@ SMnode *mndOpen(const char *path, const SMnodeOpt *pOption) { return NULL; } - mndUpdateMnodeRole(pMnode); mDebug("mnode open successfully "); return pMnode; } @@ -330,54 +331,149 @@ void mndClose(SMnode *pMnode) { } } -int32_t mndAlter(SMnode *pMnode, const SMnodeOpt *pOption) { - mDebug("start to alter mnode"); - mDebug("mnode is altered"); - return 0; +int32_t mndStart(SMnode *pMnode) { + mndSyncStart(pMnode); + if (pMnode->deploy) { + if (sdbDeploy(pMnode->pSdb) != 0) { + mError("failed to deploy sdb while start mnode"); + return -1; + } + mndSetRestore(pMnode, true); + } + return mndInitTimer(pMnode); } -int32_t mndStart(SMnode *pMnode) { return mndInitTimer(pMnode); } +void mndStop(SMnode *pMnode) { + mndSetStop(pMnode); + mndSyncStop(pMnode); + mndCleanupTimer(pMnode); +} -void mndStop(SMnode *pMnode) { return mndCleanupTimer(pMnode); } +int32_t mndProcessSyncMsg(SRpcMsg *pMsg) { + SMnode *pMnode = pMsg->info.node; + SSyncMgmt *pMgmt = &pMnode->syncMgmt; + int32_t code = TAOS_SYNC_PROPOSE_OTHER_ERROR; -int32_t mndProcessMsg(SRpcMsg *pMsg) { - SMnode *pMnode = pMsg->info.node; - void *ahandle = pMsg->info.ahandle; - mTrace("msg:%p, will be processed, type:%s app:%p", pMsg, TMSG_INFO(pMsg->msgType), ahandle); + if (!syncEnvIsStart()) { + mError("failed to process sync msg:%p type:%s since syncEnv stop", pMsg, TMSG_INFO(pMsg->msgType)); + return TAOS_SYNC_PROPOSE_OTHER_ERROR; + } - if (IsReq(pMsg)) { - if (!mndIsMaster(pMnode)) { - terrno = TSDB_CODE_APP_NOT_READY; - mDebug("msg:%p, failed to process since %s, app:%p", pMsg, terrstr(), ahandle); - return -1; - } + SSyncNode *pSyncNode = syncNodeAcquire(pMgmt->sync); + if (pSyncNode == NULL) { + mError("failed to process sync msg:%p type:%s since syncNode is null", pMsg, TMSG_INFO(pMsg->msgType)); + return TAOS_SYNC_PROPOSE_OTHER_ERROR; + } - if (pMsg->contLen == 0 || pMsg->pCont == NULL) { - terrno = TSDB_CODE_INVALID_MSG_LEN; - mError("msg:%p, failed to process since %s, app:%p", pMsg, terrstr(), ahandle); - return -1; + if (mndAcquireSyncRef(pMnode) != 0) { + mError("failed to process sync msg:%p type:%s since %s", pMsg, TMSG_INFO(pMsg->msgType), terrstr()); + return TAOS_SYNC_PROPOSE_OTHER_ERROR; + } + + char logBuf[512] = {0}; + char *syncNodeStr = sync2SimpleStr(pMgmt->sync); + snprintf(logBuf, sizeof(logBuf), "==vnodeProcessSyncReq== msgType:%d, syncNode: %s", pMsg->msgType, syncNodeStr); + syncRpcMsgLog2(logBuf, pMsg); + taosMemoryFree(syncNodeStr); + + if (pMsg->msgType == TDMT_VND_SYNC_TIMEOUT) { + SyncTimeout *pSyncMsg = syncTimeoutFromRpcMsg2(pMsg); + code = syncNodeOnTimeoutCb(pSyncNode, pSyncMsg); + syncTimeoutDestroy(pSyncMsg); + } else if (pMsg->msgType == TDMT_VND_SYNC_PING) { + SyncPing *pSyncMsg = syncPingFromRpcMsg2(pMsg); + code = syncNodeOnPingCb(pSyncNode, pSyncMsg); + syncPingDestroy(pSyncMsg); + } else if (pMsg->msgType == TDMT_VND_SYNC_PING_REPLY) { + SyncPingReply *pSyncMsg = syncPingReplyFromRpcMsg2(pMsg); + code = syncNodeOnPingReplyCb(pSyncNode, pSyncMsg); + syncPingReplyDestroy(pSyncMsg); + } else if (pMsg->msgType == TDMT_VND_SYNC_CLIENT_REQUEST) { + SyncClientRequest *pSyncMsg = syncClientRequestFromRpcMsg2(pMsg); + code = syncNodeOnClientRequestCb(pSyncNode, pSyncMsg); + syncClientRequestDestroy(pSyncMsg); + } else if (pMsg->msgType == TDMT_VND_SYNC_REQUEST_VOTE) { + SyncRequestVote *pSyncMsg = syncRequestVoteFromRpcMsg2(pMsg); + code = syncNodeOnRequestVoteCb(pSyncNode, pSyncMsg); + syncRequestVoteDestroy(pSyncMsg); + } else if (pMsg->msgType == TDMT_VND_SYNC_REQUEST_VOTE_REPLY) { + SyncRequestVoteReply *pSyncMsg = syncRequestVoteReplyFromRpcMsg2(pMsg); + code = syncNodeOnRequestVoteReplyCb(pSyncNode, pSyncMsg); + syncRequestVoteReplyDestroy(pSyncMsg); + } else if (pMsg->msgType == TDMT_VND_SYNC_APPEND_ENTRIES) { + SyncAppendEntries *pSyncMsg = syncAppendEntriesFromRpcMsg2(pMsg); + code = syncNodeOnAppendEntriesCb(pSyncNode, pSyncMsg); + syncAppendEntriesDestroy(pSyncMsg); + } else if (pMsg->msgType == TDMT_VND_SYNC_APPEND_ENTRIES_REPLY) { + SyncAppendEntriesReply *pSyncMsg = syncAppendEntriesReplyFromRpcMsg2(pMsg); + code = syncNodeOnAppendEntriesReplyCb(pSyncNode, pSyncMsg); + syncAppendEntriesReplyDestroy(pSyncMsg); + } else { + mError("failed to process msg:%p since invalid type:%s", pMsg, TMSG_INFO(pMsg->msgType)); + code = TAOS_SYNC_PROPOSE_OTHER_ERROR; + } + + mndReleaseSyncRef(pMnode); + return code; +} + +static int32_t mndCheckMnodeState(SRpcMsg *pMsg) { + if (mndAcquireRpcRef(pMsg->info.node) == 0) return 0; + + if (IsReq(pMsg) && pMsg->msgType != TDMT_MND_MQ_TIMER && pMsg->msgType != TDMT_MND_TELEM_TIMER && + pMsg->msgType != TDMT_MND_TRANS_TIMER) { + mError("msg:%p, failed to check mnode state since %s, app:%p type:%s", pMsg, terrstr(), pMsg->info.ahandle, + TMSG_INFO(pMsg->msgType)); + + SEpSet epSet = {0}; + mndGetMnodeEpSet(pMsg->info.node, &epSet); + + int32_t contLen = tSerializeSEpSet(NULL, 0, &epSet); + pMsg->info.rsp = rpcMallocCont(contLen); + if (pMsg->info.rsp != NULL) { + tSerializeSEpSet(pMsg->info.rsp, contLen, &epSet); + pMsg->info.rspLen = contLen; + terrno = TSDB_CODE_RPC_REDIRECT; + } else { + terrno = TSDB_CODE_OUT_OF_MEMORY; } } + return -1; +} + +static int32_t mndCheckMsgContent(SRpcMsg *pMsg) { + if (!IsReq(pMsg)) return 0; + if (pMsg->contLen != 0 && pMsg->pCont != NULL) return 0; + + mError("msg:%p, failed to check msg content, app:%p type:%s", pMsg, pMsg->info.ahandle, TMSG_INFO(pMsg->msgType)); + terrno = TSDB_CODE_INVALID_MSG_LEN; + return -1; +} + +int32_t mndProcessRpcMsg(SRpcMsg *pMsg) { + SMnode *pMnode = pMsg->info.node; MndMsgFp fp = pMnode->msgFp[TMSG_INDEX(pMsg->msgType)]; if (fp == NULL) { + mError("msg:%p, failed to get msg handle, app:%p type:%s", pMsg, pMsg->info.ahandle, TMSG_INFO(pMsg->msgType)); terrno = TSDB_CODE_MSG_NOT_PROCESSED; - mError("msg:%p, failed to process since no msg handle, app:%p", pMsg, ahandle); return -1; } + if (mndCheckMsgContent(pMsg) != 0) return -1; + if (mndCheckMnodeState(pMsg) != 0) return -1; + + mTrace("msg:%p, start to process in mnode, app:%p type:%s", pMsg, pMsg->info.ahandle, TMSG_INFO(pMsg->msgType)); int32_t code = (*fp)(pMsg); + mndReleaseRpcRef(pMnode); + if (code == TSDB_CODE_ACTION_IN_PROGRESS) { - terrno = code; - mTrace("msg:%p, in progress, app:%p", pMsg, ahandle); - } else if (code != 0) { - if (terrno != TSDB_CODE_OPS_NOT_SUPPORT) { - mError("msg:%p, failed to process since %s, app:%p", pMsg, terrstr(), ahandle); - } else { - mTrace("msg:%p, failed to process since %s, app:%p", pMsg, terrstr(), ahandle); - } + mTrace("msg:%p, won't response immediately since in progress", pMsg); + } else if (code == 0) { + mTrace("msg:%p, successfully processed and response", pMsg); } else { - mTrace("msg:%p, is processed, app:%p", pMsg, ahandle); + mError("msg:%p, failed to process since %s, app:%p type:%s", pMsg, terrstr(), pMsg->info.ahandle, + TMSG_INFO(pMsg->msgType)); } return code; @@ -406,7 +502,7 @@ int64_t mndGenerateUid(char *name, int32_t len) { int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgroupInfo *pVgroupInfo, SMonGrantInfo *pGrantInfo) { - if (!mndIsMaster(pMnode)) return -1; + if (mndAcquireRpcRef(pMnode) != 0) return -1; SSdb *pSdb = pMnode->pSdb; int64_t ms = taosGetTimestampMs(); @@ -415,6 +511,7 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr pClusterInfo->mnodes = taosArrayInit(sdbGetSize(pSdb, SDB_MNODE), sizeof(SMonMnodeDesc)); pVgroupInfo->vgroups = taosArrayInit(sdbGetSize(pSdb, SDB_VGROUP), sizeof(SMonVgroupDesc)); if (pClusterInfo->dnodes == NULL || pClusterInfo->mnodes == NULL || pVgroupInfo->vgroups == NULL) { + mndReleaseRpcRef(pMnode); return -1; } @@ -450,15 +547,17 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr SMonMnodeDesc desc = {0}; desc.mnode_id = pObj->id; tstrncpy(desc.mnode_ep, pObj->pDnode->ep, sizeof(desc.mnode_ep)); - tstrncpy(desc.role, syncStr(pObj->role), sizeof(desc.role)); - taosArrayPush(pClusterInfo->mnodes, &desc); - sdbRelease(pSdb, pObj); - if (pObj->role == TAOS_SYNC_STATE_LEADER) { + if (pObj->id == pMnode->selfDnodeId) { pClusterInfo->first_ep_dnode_id = pObj->id; tstrncpy(pClusterInfo->first_ep, pObj->pDnode->ep, sizeof(pClusterInfo->first_ep)); - pClusterInfo->master_uptime = (ms - pObj->roleTime) / (86400000.0f); + pClusterInfo->master_uptime = (ms - pObj->stateStartTime) / (86400000.0f); + tstrncpy(desc.role, syncStr(TAOS_SYNC_STATE_LEADER), sizeof(desc.role)); + } else { + tstrncpy(desc.role, syncStr(pObj->state), sizeof(desc.role)); } + taosArrayPush(pClusterInfo->mnodes, &desc); + sdbRelease(pSdb, pObj); } // vgroup info @@ -507,10 +606,84 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr pGrantInfo->timeseries_total = INT32_MAX; } + mndReleaseRpcRef(pMnode); return 0; } int32_t mndGetLoad(SMnode *pMnode, SMnodeLoad *pLoad) { - pLoad->syncState = pMnode->syncMgmt.state; + pLoad->syncState = syncGetMyRole(pMnode->syncMgmt.sync); return 0; } + +int32_t mndAcquireRpcRef(SMnode *pMnode) { + int32_t code = 0; + taosThreadRwlockRdlock(&pMnode->lock); + if (pMnode->stopped) { + terrno = TSDB_CODE_APP_NOT_READY; + code = -1; + } else if (!mndIsMaster(pMnode)) { + code = -1; + } else { + int32_t ref = atomic_add_fetch_32(&pMnode->rpcRef, 1); + mTrace("mnode rpc is acquired, ref:%d", ref); + } + taosThreadRwlockUnlock(&pMnode->lock); + return code; +} + +void mndReleaseRpcRef(SMnode *pMnode) { + taosThreadRwlockRdlock(&pMnode->lock); + int32_t ref = atomic_sub_fetch_32(&pMnode->rpcRef, 1); + mTrace("mnode rpc is released, ref:%d", ref); + taosThreadRwlockUnlock(&pMnode->lock); +} + +void mndSetRestore(SMnode *pMnode, bool restored) { + if (restored) { + taosThreadRwlockWrlock(&pMnode->lock); + pMnode->restored = true; + taosThreadRwlockUnlock(&pMnode->lock); + mTrace("mnode set restored:%d", restored); + } else { + taosThreadRwlockWrlock(&pMnode->lock); + pMnode->restored = false; + taosThreadRwlockUnlock(&pMnode->lock); + mTrace("mnode set restored:%d", restored); + while (1) { + if (pMnode->rpcRef <= 0) break; + taosMsleep(3); + } + } +} + +bool mndGetRestored(SMnode *pMnode) { return pMnode->restored; } + +void mndSetStop(SMnode *pMnode) { + taosThreadRwlockWrlock(&pMnode->lock); + pMnode->stopped = true; + taosThreadRwlockUnlock(&pMnode->lock); + mTrace("mnode set stopped"); +} + +bool mndGetStop(SMnode *pMnode) { return pMnode->stopped; } + +int32_t mndAcquireSyncRef(SMnode *pMnode) { + int32_t code = 0; + taosThreadRwlockRdlock(&pMnode->lock); + if (pMnode->stopped) { + terrno = TSDB_CODE_APP_NOT_READY; + code = -1; + } else { + int32_t ref = atomic_add_fetch_32(&pMnode->syncRef, 1); + mTrace("mnode sync is acquired, ref:%d", ref); + } + taosThreadRwlockUnlock(&pMnode->lock); + return code; +} + +void mndReleaseSyncRef(SMnode *pMnode) { + taosThreadRwlockRdlock(&pMnode->lock); + int32_t ref = atomic_sub_fetch_32(&pMnode->syncRef, 1); + mTrace("mnode sync is released, ref:%d", ref); + taosThreadRwlockUnlock(&pMnode->lock); +} \ No newline at end of file diff --git a/source/dnode/mnode/impl/src/mndMnode.c b/source/dnode/mnode/impl/src/mndMnode.c index 7f86eb8b3292508c9b903c68fd6306b766ac074f..23634be77b3ca6f21f31019275353de12ab6c83b 100644 --- a/source/dnode/mnode/impl/src/mndMnode.c +++ b/source/dnode/mnode/impl/src/mndMnode.c @@ -20,6 +20,7 @@ #include "mndShow.h" #include "mndTrans.h" #include "mndUser.h" +#include "mndSync.h" #define MNODE_VER_NUMBER 1 #define MNODE_RESERVE_SIZE 64 @@ -31,6 +32,7 @@ static int32_t mndMnodeActionInsert(SSdb *pSdb, SMnodeObj *pObj); static int32_t mndMnodeActionDelete(SSdb *pSdb, SMnodeObj *pObj); static int32_t mndMnodeActionUpdate(SSdb *pSdb, SMnodeObj *pOld, SMnodeObj *pNew); static int32_t mndProcessCreateMnodeReq(SRpcMsg *pReq); +static int32_t mndProcessAlterMnodeReq(SRpcMsg *pReq); static int32_t mndProcessDropMnodeReq(SRpcMsg *pReq); static int32_t mndProcessCreateMnodeRsp(SRpcMsg *pRsp); static int32_t mndProcessAlterMnodeRsp(SRpcMsg *pRsp); @@ -39,16 +41,19 @@ static int32_t mndRetrieveMnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *p static void mndCancelGetNextMnode(SMnode *pMnode, void *pIter); int32_t mndInitMnode(SMnode *pMnode) { - SSdbTable table = {.sdbType = SDB_MNODE, - .keyType = SDB_KEY_INT32, - .deployFp = (SdbDeployFp)mndCreateDefaultMnode, - .encodeFp = (SdbEncodeFp)mndMnodeActionEncode, - .decodeFp = (SdbDecodeFp)mndMnodeActionDecode, - .insertFp = (SdbInsertFp)mndMnodeActionInsert, - .updateFp = (SdbUpdateFp)mndMnodeActionUpdate, - .deleteFp = (SdbDeleteFp)mndMnodeActionDelete}; + SSdbTable table = { + .sdbType = SDB_MNODE, + .keyType = SDB_KEY_INT32, + .deployFp = (SdbDeployFp)mndCreateDefaultMnode, + .encodeFp = (SdbEncodeFp)mndMnodeActionEncode, + .decodeFp = (SdbDecodeFp)mndMnodeActionDecode, + .insertFp = (SdbInsertFp)mndMnodeActionInsert, + .updateFp = (SdbUpdateFp)mndMnodeActionUpdate, + .deleteFp = (SdbDeleteFp)mndMnodeActionDelete, + }; mndSetMsgHandle(pMnode, TDMT_MND_CREATE_MNODE, mndProcessCreateMnodeReq); + mndSetMsgHandle(pMnode, TDMT_DND_ALTER_MNODE, mndProcessAlterMnodeReq); mndSetMsgHandle(pMnode, TDMT_MND_DROP_MNODE, mndProcessDropMnodeReq); mndSetMsgHandle(pMnode, TDMT_DND_CREATE_MNODE_RSP, mndProcessCreateMnodeRsp); mndSetMsgHandle(pMnode, TDMT_DND_ALTER_MNODE_RSP, mndProcessAlterMnodeRsp); @@ -75,28 +80,6 @@ void mndReleaseMnode(SMnode *pMnode, SMnodeObj *pObj) { sdbRelease(pMnode->pSdb, pObj); } -void mndUpdateMnodeRole(SMnode *pMnode) { - SSdb *pSdb = pMnode->pSdb; - void *pIter = NULL; - while (1) { - SMnodeObj *pObj = NULL; - pIter = sdbFetch(pSdb, SDB_MNODE, pIter, (void **)&pObj); - if (pIter == NULL) break; - - ESyncState lastRole = pObj->role; - if (pObj->id == 1) { - pObj->role = TAOS_SYNC_STATE_LEADER; - } else { - pObj->role = TAOS_SYNC_STATE_CANDIDATE; - } - if (pObj->role != lastRole) { - pObj->roleTime = taosGetTimestampMs(); - } - - sdbRelease(pSdb, pObj); - } -} - static int32_t mndCreateDefaultMnode(SMnode *pMnode) { SMnodeObj mnodeObj = {0}; mnodeObj.id = 1; @@ -108,7 +91,33 @@ static int32_t mndCreateDefaultMnode(SMnode *pMnode) { sdbSetRawStatus(pRaw, SDB_STATUS_READY); mDebug("mnode:%d, will be created while deploy sdb, raw:%p", mnodeObj.id, pRaw); + +#if 0 return sdbWrite(pMnode->pSdb, pRaw); +#else + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_CREATE_DNODE, NULL); + if (pTrans == NULL) { + mError("mnode:%d, failed to create since %s", mnodeObj.id, terrstr()); + return -1; + } + mDebug("trans:%d, used to create mnode:%d", pTrans->id, mnodeObj.id); + + if (mndTransAppendCommitlog(pTrans, pRaw) != 0) { + mError("trans:%d, failed to append commit log since %s", pTrans->id, terrstr()); + mndTransDrop(pTrans); + return -1; + } + sdbSetRawStatus(pRaw, SDB_STATUS_READY); + + if (mndTransPrepare(pMnode, pTrans) != 0) { + mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr()); + mndTransDrop(pTrans); + return -1; + } + + mndTransDrop(pTrans); + return 0; +#endif } static SSdbRaw *mndMnodeActionEncode(SMnodeObj *pObj) { @@ -181,7 +190,7 @@ static int32_t mndMnodeActionInsert(SSdb *pSdb, SMnodeObj *pObj) { return -1; } - pObj->role = TAOS_SYNC_STATE_FOLLOWER; + pObj->state = TAOS_SYNC_STATE_ERROR; return 0; } @@ -214,23 +223,24 @@ bool mndIsMnode(SMnode *pMnode, int32_t dnodeId) { } void mndGetMnodeEpSet(SMnode *pMnode, SEpSet *pEpSet) { - SSdb *pSdb = pMnode->pSdb; - pEpSet->numOfEps = 0; + SSdb *pSdb = pMnode->pSdb; + int32_t totalMnodes = sdbGetSize(pSdb, SDB_MNODE); + void *pIter = NULL; - void *pIter = NULL; while (1) { SMnodeObj *pObj = NULL; pIter = sdbFetch(pSdb, SDB_MNODE, pIter, (void **)&pObj); if (pIter == NULL) break; - if (pObj->pDnode == NULL) { - mError("mnode:%d, no corresponding dnode exists", pObj->id); - } else { - if (pObj->role == TAOS_SYNC_STATE_LEADER) { + + if (pObj->id == pMnode->selfDnodeId) { + if (mndIsMaster(pMnode)) { pEpSet->inUse = pEpSet->numOfEps; + } else { + pEpSet->inUse = (pEpSet->numOfEps + 1) % totalMnodes; } - addEpIntoEpSet(pEpSet, pObj->pDnode->fqdn, pObj->pDnode->port); - sdbRelease(pSdb, pObj); } + addEpIntoEpSet(pEpSet, pObj->pDnode->fqdn, pObj->pDnode->port); + sdbRelease(pSdb, pObj); } } @@ -259,75 +269,83 @@ static int32_t mndSetCreateMnodeCommitLogs(SMnode *pMnode, STrans *pTrans, SMnod } static int32_t mndSetCreateMnodeRedoActions(SMnode *pMnode, STrans *pTrans, SDnodeObj *pDnode, SMnodeObj *pObj) { - SSdb *pSdb = pMnode->pSdb; - void *pIter = NULL; - int32_t numOfReplicas = 0; - + SSdb *pSdb = pMnode->pSdb; + void *pIter = NULL; + int32_t numOfReplicas = 0; + SDAlterMnodeReq alterReq = {0}; SDCreateMnodeReq createReq = {0}; + SEpSet alterEpset = {0}; + SEpSet createEpset = {0}; + while (1) { SMnodeObj *pMObj = NULL; pIter = sdbFetch(pSdb, SDB_MNODE, pIter, (void **)&pMObj); if (pIter == NULL) break; - SReplica *pReplica = &createReq.replicas[numOfReplicas]; - pReplica->id = pMObj->id; - pReplica->port = pMObj->pDnode->port; - memcpy(pReplica->fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN); - numOfReplicas++; + alterReq.replicas[numOfReplicas].id = pMObj->id; + alterReq.replicas[numOfReplicas].port = pMObj->pDnode->port; + memcpy(alterReq.replicas[numOfReplicas].fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN); + + alterEpset.eps[numOfReplicas].port = pMObj->pDnode->port; + memcpy(alterEpset.eps[numOfReplicas].fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN); + if (pMObj->state == TAOS_SYNC_STATE_LEADER) { + alterEpset.inUse = numOfReplicas; + } + numOfReplicas++; sdbRelease(pSdb, pMObj); } - SReplica *pReplica = &createReq.replicas[numOfReplicas]; - pReplica->id = pDnode->id; - pReplica->port = pDnode->port; - memcpy(pReplica->fqdn, pDnode->fqdn, TSDB_FQDN_LEN); - numOfReplicas++; + alterReq.replica = numOfReplicas + 1; + alterReq.replicas[numOfReplicas].id = pDnode->id; + alterReq.replicas[numOfReplicas].port = pDnode->port; + memcpy(alterReq.replicas[numOfReplicas].fqdn, pDnode->fqdn, TSDB_FQDN_LEN); - createReq.replica = numOfReplicas; + alterEpset.numOfEps = numOfReplicas + 1; + alterEpset.eps[numOfReplicas].port = pDnode->port; + memcpy(alterEpset.eps[numOfReplicas].fqdn, pDnode->fqdn, TSDB_FQDN_LEN); - while (1) { - SMnodeObj *pMObj = NULL; - pIter = sdbFetch(pSdb, SDB_MNODE, pIter, (void **)&pMObj); - if (pIter == NULL) break; + createReq.replica = 1; + createReq.replicas[0].id = pDnode->id; + createReq.replicas[0].port = pDnode->port; + memcpy(createReq.replicas[0].fqdn, pDnode->fqdn, TSDB_FQDN_LEN); - STransAction action = {0}; + createEpset.numOfEps = 1; + createEpset.eps[0].port = pDnode->port; + memcpy(createEpset.eps[0].fqdn, pDnode->fqdn, TSDB_FQDN_LEN); - createReq.dnodeId = pMObj->id; + { int32_t contLen = tSerializeSDCreateMnodeReq(NULL, 0, &createReq); void *pReq = taosMemoryMalloc(contLen); tSerializeSDCreateMnodeReq(pReq, contLen, &createReq); - action.epSet = mndGetDnodeEpset(pMObj->pDnode); - action.pCont = pReq; - action.contLen = contLen; - action.msgType = TDMT_DND_ALTER_MNODE; - action.acceptableCode = TSDB_CODE_NODE_ALREADY_DEPLOYED; + STransAction action = { + .epSet = createEpset, + .pCont = pReq, + .contLen = contLen, + .msgType = TDMT_DND_CREATE_MNODE, + .acceptableCode = TSDB_CODE_NODE_ALREADY_DEPLOYED, + }; if (mndTransAppendRedoAction(pTrans, &action) != 0) { taosMemoryFree(pReq); - sdbCancelFetch(pSdb, pIter); - sdbRelease(pSdb, pMObj); return -1; } - - sdbRelease(pSdb, pMObj); } { - STransAction action = {0}; - action.epSet = mndGetDnodeEpset(pDnode); - - createReq.dnodeId = pObj->id; - int32_t contLen = tSerializeSDCreateMnodeReq(NULL, 0, &createReq); + int32_t contLen = tSerializeSDCreateMnodeReq(NULL, 0, &alterReq); void *pReq = taosMemoryMalloc(contLen); - tSerializeSDCreateMnodeReq(pReq, contLen, &createReq); + tSerializeSDCreateMnodeReq(pReq, contLen, &alterReq); + + STransAction action = { + .epSet = alterEpset, + .pCont = pReq, + .contLen = contLen, + .msgType = TDMT_DND_ALTER_MNODE, + .acceptableCode = 0, + }; - action.epSet = mndGetDnodeEpset(pDnode); - action.pCont = pReq; - action.contLen = contLen; - action.msgType = TDMT_DND_CREATE_MNODE; - action.acceptableCode = TSDB_CODE_NODE_ALREADY_DEPLOYED; if (mndTransAppendRedoAction(pTrans, &action) != 0) { taosMemoryFree(pReq); return -1; @@ -349,6 +367,7 @@ static int32_t mndCreateMnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode, if (pTrans == NULL) goto _OVER; mDebug("trans:%d, used to create mnode:%d", pTrans->id, pCreate->dnodeId); + mndTransSetExecOneByOne(pTrans); if (mndSetCreateMnodeRedoLogs(pMnode, pTrans, &mnodeObj) != 0) goto _OVER; if (mndSetCreateMnodeCommitLogs(pMnode, pTrans, &mnodeObj) != 0) goto _OVER; if (mndSetCreateMnodeRedoActions(pMnode, pTrans, pDnode, &mnodeObj) != 0) goto _OVER; @@ -433,73 +452,77 @@ static int32_t mndSetDropMnodeCommitLogs(SMnode *pMnode, STrans *pTrans, SMnodeO } static int32_t mndSetDropMnodeRedoActions(SMnode *pMnode, STrans *pTrans, SDnodeObj *pDnode, SMnodeObj *pObj) { - SSdb *pSdb = pMnode->pSdb; - void *pIter = NULL; - int32_t numOfReplicas = 0; - + SSdb *pSdb = pMnode->pSdb; + void *pIter = NULL; + int32_t numOfReplicas = 0; SDAlterMnodeReq alterReq = {0}; + SDDropMnodeReq dropReq = {0}; + SEpSet alterEpset = {0}; + SEpSet dropEpSet = {0}; + while (1) { SMnodeObj *pMObj = NULL; pIter = sdbFetch(pSdb, SDB_MNODE, pIter, (void **)&pMObj); if (pIter == NULL) break; + if (pMObj->id == pObj->id) { + sdbRelease(pSdb, pMObj); + continue; + } - if (pMObj->id != pObj->id) { - SReplica *pReplica = &alterReq.replicas[numOfReplicas]; - pReplica->id = pMObj->id; - pReplica->port = pMObj->pDnode->port; - memcpy(pReplica->fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN); - numOfReplicas++; + alterReq.replicas[numOfReplicas].id = pMObj->id; + alterReq.replicas[numOfReplicas].port = pMObj->pDnode->port; + memcpy(alterReq.replicas[numOfReplicas].fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN); + + alterEpset.eps[numOfReplicas].port = pMObj->pDnode->port; + memcpy(alterEpset.eps[numOfReplicas].fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN); + if (pMObj->state == TAOS_SYNC_STATE_LEADER) { + alterEpset.inUse = numOfReplicas; } + numOfReplicas++; sdbRelease(pSdb, pMObj); } alterReq.replica = numOfReplicas; + alterEpset.numOfEps = numOfReplicas; - while (1) { - SMnodeObj *pMObj = NULL; - pIter = sdbFetch(pSdb, SDB_MNODE, pIter, (void **)&pMObj); - if (pIter == NULL) break; - if (pMObj->id != pObj->id) { - STransAction action = {0}; - - alterReq.dnodeId = pMObj->id; - int32_t contLen = tSerializeSDCreateMnodeReq(NULL, 0, &alterReq); - void *pReq = taosMemoryMalloc(contLen); - tSerializeSDCreateMnodeReq(pReq, contLen, &alterReq); - - action.epSet = mndGetDnodeEpset(pMObj->pDnode); - action.pCont = pReq; - action.contLen = contLen; - action.msgType = TDMT_DND_ALTER_MNODE; - action.acceptableCode = TSDB_CODE_NODE_ALREADY_DEPLOYED; - - if (mndTransAppendRedoAction(pTrans, &action) != 0) { - taosMemoryFree(pReq); - sdbCancelFetch(pSdb, pIter); - sdbRelease(pSdb, pMObj); - return -1; - } - } + dropReq.dnodeId = pDnode->id; + dropEpSet.numOfEps = 1; + dropEpSet.eps[0].port = pDnode->port; + memcpy(dropEpSet.eps[0].fqdn, pDnode->fqdn, TSDB_FQDN_LEN); - sdbRelease(pSdb, pMObj); + { + int32_t contLen = tSerializeSDCreateMnodeReq(NULL, 0, &alterReq); + void *pReq = taosMemoryMalloc(contLen); + tSerializeSDCreateMnodeReq(pReq, contLen, &alterReq); + + STransAction action = { + .epSet = alterEpset, + .pCont = pReq, + .contLen = contLen, + .msgType = TDMT_DND_ALTER_MNODE, + .acceptableCode = 0, + }; + + if (mndTransAppendRedoAction(pTrans, &action) != 0) { + taosMemoryFree(pReq); + return -1; + } } { - STransAction action = {0}; - action.epSet = mndGetDnodeEpset(pDnode); - - SDDropMnodeReq dropReq = {0}; - dropReq.dnodeId = pObj->id; int32_t contLen = tSerializeSCreateDropMQSBNodeReq(NULL, 0, &dropReq); void *pReq = taosMemoryMalloc(contLen); tSerializeSCreateDropMQSBNodeReq(pReq, contLen, &dropReq); - action.epSet = mndGetDnodeEpset(pDnode); - action.pCont = pReq; - action.contLen = contLen; - action.msgType = TDMT_DND_DROP_MNODE; - action.acceptableCode = TSDB_CODE_NODE_NOT_DEPLOYED; + STransAction action = { + .epSet = dropEpSet, + .pCont = pReq, + .contLen = contLen, + .msgType = TDMT_DND_DROP_MNODE, + .acceptableCode = TSDB_CODE_NODE_NOT_DEPLOYED, + }; + if (mndTransAppendRedoAction(pTrans, &action) != 0) { taosMemoryFree(pReq); return -1; @@ -516,7 +539,7 @@ static int32_t mndDropMnode(SMnode *pMnode, SRpcMsg *pReq, SMnodeObj *pObj) { if (pTrans == NULL) goto _OVER; mDebug("trans:%d, used to drop mnode:%d", pTrans->id, pObj->id); - + mndTransSetExecOneByOne(pTrans); if (mndSetDropMnodeRedoLogs(pMnode, pTrans, pObj) != 0) goto _OVER; if (mndSetDropMnodeCommitLogs(pMnode, pTrans, pObj) != 0) goto _OVER; if (mndSetDropMnodeRedoActions(pMnode, pTrans, pObj->pDnode, pObj) != 0) goto _OVER; @@ -553,7 +576,7 @@ static int32_t mndProcessDropMnodeReq(SRpcMsg *pReq) { goto _OVER; } - if (pMnode->selfId == dropReq.dnodeId) { + if (pMnode->selfDnodeId == dropReq.dnodeId) { terrno = TSDB_CODE_MND_CANT_DROP_MASTER; goto _OVER; } @@ -624,16 +647,18 @@ static int32_t mndRetrieveMnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, b1, false); - const char *roles = syncStr(pObj->role); - char *b2 = taosMemoryCalloc(1, 12 + VARSTR_HEADER_SIZE); + const char *roles = NULL; + if (pObj->id == pMnode->selfDnodeId) { + roles = syncStr(TAOS_SYNC_STATE_LEADER); + } else { + roles = syncStr(pObj->state); + } + char *b2 = taosMemoryCalloc(1, 12 + VARSTR_HEADER_SIZE); STR_WITH_MAXSIZE_TO_VARSTR(b2, roles, pShow->pMeta->pSchemas[cols].bytes); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, (const char *)b2, false); - pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataAppend(pColInfo, numOfRows, (const char *)&pObj->roleTime, false); - pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, (const char *)&pObj->createdTime, false); @@ -650,3 +675,49 @@ static void mndCancelGetNextMnode(SMnode *pMnode, void *pIter) { SSdb *pSdb = pMnode->pSdb; sdbCancelFetch(pSdb, pIter); } + +static int32_t mndProcessAlterMnodeReq(SRpcMsg *pReq) { + SMnode *pMnode = pReq->info.node; + SDAlterMnodeReq alterReq = {0}; + + if (tDeserializeSDCreateMnodeReq(pReq->pCont, pReq->contLen, &alterReq) != 0) { + terrno = TSDB_CODE_INVALID_MSG; + return -1; + } + + SSyncCfg cfg = {.replicaNum = alterReq.replica, .myIndex = -1}; + for (int32_t i = 0; i < alterReq.replica; ++i) { + SNodeInfo *pNode = &cfg.nodeInfo[i]; + tstrncpy(pNode->nodeFqdn, alterReq.replicas[i].fqdn, sizeof(pNode->nodeFqdn)); + pNode->nodePort = alterReq.replicas[i].port; + if (alterReq.replicas[i].id == pMnode->selfDnodeId) cfg.myIndex = i; + } + + if (cfg.myIndex == -1) { + mError("failed to alter mnode since myindex is -1"); + return -1; + } else { + mInfo("start to alter mnode sync, replica:%d myindex:%d", cfg.replicaNum, cfg.myIndex); + for (int32_t i = 0; i < alterReq.replica; ++i) { + SNodeInfo *pNode = &cfg.nodeInfo[i]; + mInfo("index:%d, fqdn:%s port:%d", i, pNode->nodeFqdn, pNode->nodePort); + } + } + + mTrace("trans:-1, sync reconfig will be proposed"); + + SSyncMgmt *pMgmt = &pMnode->syncMgmt; + pMgmt->standby = 0; + int32_t code = syncReconfig(pMgmt->sync, &cfg); + if (code != 0) { + mError("trans:-1, failed to propose sync reconfig since %s", terrstr()); + return code; + } else { + pMgmt->errCode = 0; + pMgmt->transId = -1; + tsem_wait(&pMgmt->syncSem); + mInfo("alter mnode sync result:%s", tstrerror(pMgmt->errCode)); + terrno = pMgmt->errCode; + return pMgmt->errCode; + } +} diff --git a/source/dnode/mnode/impl/src/mndOffset.c b/source/dnode/mnode/impl/src/mndOffset.c index 6f42d66625aa52d4ce32b7fae3dd5947aea5fb1a..01516d03f28f168b71ea5272bf983c181a059bcd 100644 --- a/source/dnode/mnode/impl/src/mndOffset.c +++ b/source/dnode/mnode/impl/src/mndOffset.c @@ -58,6 +58,12 @@ bool mndOffsetFromTopic(SMqOffsetObj *pOffset, const char *topic) { return false; } +bool mndOffsetFromSubKey(SMqOffsetObj *pOffset, const char *subKey) { + int32_t i = 0; + while (pOffset->key[i] != ':') i++; + if (strcmp(&pOffset->key[i + 1], subKey) == 0) return true; + return false; +} SSdbRaw *mndOffsetActionEncode(SMqOffsetObj *pOffset) { terrno = TSDB_CODE_OUT_OF_MEMORY; void *buf = NULL; @@ -153,6 +159,7 @@ int32_t mndCreateOffsets(STrans *pTrans, const char *cgroup, const char *topicNa return -1; } sdbSetRawStatus(pOffsetRaw, SDB_STATUS_READY); + // commit log or redo log? if (mndTransAppendRedolog(pTrans, pOffsetRaw) < 0) { return -1; } @@ -188,7 +195,7 @@ static int32_t mndProcessCommitOffsetReq(SRpcMsg *pMsg) { pOffsetObj->offset = pOffset->offset; SSdbRaw *pOffsetRaw = mndOffsetActionEncode(pOffsetObj); sdbSetRawStatus(pOffsetRaw, SDB_STATUS_READY); - mndTransAppendRedolog(pTrans, pOffsetRaw); + mndTransAppendCommitlog(pTrans, pOffsetRaw); if (create) { taosMemoryFree(pOffsetObj); } else { @@ -302,7 +309,35 @@ int32_t mndDropOffsetByTopic(SMnode *pMnode, STrans *pTrans, const char *topic) continue; } - if (mndSetDropOffsetRedoLogs(pMnode, pTrans, pOffset) < 0) { + if (mndSetDropOffsetCommitLogs(pMnode, pTrans, pOffset) < 0) { + sdbRelease(pSdb, pOffset); + goto END; + } + + sdbRelease(pSdb, pOffset); + } + + code = 0; +END: + return code; +} + +int32_t mndDropOffsetBySubKey(SMnode *pMnode, STrans *pTrans, const char *subKey) { + int32_t code = -1; + SSdb *pSdb = pMnode->pSdb; + + void *pIter = NULL; + SMqOffsetObj *pOffset = NULL; + while (1) { + pIter = sdbFetch(pSdb, SDB_OFFSET, pIter, (void **)&pOffset); + if (pIter == NULL) break; + + if (!mndOffsetFromSubKey(pOffset, subKey)) { + sdbRelease(pSdb, pOffset); + continue; + } + + if (mndSetDropOffsetCommitLogs(pMnode, pTrans, pOffset) < 0) { sdbRelease(pSdb, pOffset); goto END; } diff --git a/source/dnode/mnode/impl/src/mndProfile.c b/source/dnode/mnode/impl/src/mndProfile.c index b9ac82d890a12f2355834f415f4bbca65461873b..c9c52af0fe3ef377317530c26648c811d1112c95 100644 --- a/source/dnode/mnode/impl/src/mndProfile.c +++ b/source/dnode/mnode/impl/src/mndProfile.c @@ -379,7 +379,7 @@ static int32_t mndProcessQueryHeartBeat(SMnode *pMnode, SRpcMsg *pMsg, SClientHb } rspBasic->connId = pConn->id; - rspBasic->totalDnodes = 1; // TODO + rspBasic->totalDnodes = mndGetDnodeSize(pMnode); rspBasic->onlineDnodes = 1; // TODO mndGetMnodeEpSet(pMnode, &rspBasic->epSet); mndReleaseConn(pMnode, pConn); diff --git a/source/dnode/mnode/impl/src/mndQuery.c b/source/dnode/mnode/impl/src/mndQuery.c index 78b70c9a74133b859b4175b195d4a939c37ebccc..97594f2b913334ac17e2bd5e6c8fc95e19a03e9e 100644 --- a/source/dnode/mnode/impl/src/mndQuery.c +++ b/source/dnode/mnode/impl/src/mndQuery.c @@ -26,19 +26,19 @@ int32_t mndProcessQueryMsg(SRpcMsg *pMsg) { mTrace("msg:%p, in query queue is processing", pMsg); switch (pMsg->msgType) { case TDMT_VND_QUERY: - code = qWorkerProcessQueryMsg(&handle, pMnode->pQuery, pMsg); + code = qWorkerProcessQueryMsg(&handle, pMnode->pQuery, pMsg, 0); break; case TDMT_VND_QUERY_CONTINUE: - code = qWorkerProcessCQueryMsg(&handle, pMnode->pQuery, pMsg); + code = qWorkerProcessCQueryMsg(&handle, pMnode->pQuery, pMsg, 0); break; case TDMT_VND_FETCH: - code = qWorkerProcessFetchMsg(pMnode, pMnode->pQuery, pMsg); + code = qWorkerProcessFetchMsg(pMnode, pMnode->pQuery, pMsg, 0); break; case TDMT_VND_DROP_TASK: - code = qWorkerProcessDropMsg(pMnode, pMnode->pQuery, pMsg); + code = qWorkerProcessDropMsg(pMnode, pMnode->pQuery, pMsg, 0); break; case TDMT_VND_QUERY_HEARTBEAT: - code = qWorkerProcessHbMsg(pMnode, pMnode->pQuery, pMsg); + code = qWorkerProcessHbMsg(pMnode, pMnode->pQuery, pMsg, 0); break; default: terrno = TSDB_CODE_VND_APP_ERROR; diff --git a/source/dnode/mnode/impl/src/mndScheduler.c b/source/dnode/mnode/impl/src/mndScheduler.c index 22a5f37334b4f18a422249afa9e870068e0e5f83..58b51e4c548106a393b65ab3142064cd0c249481 100644 --- a/source/dnode/mnode/impl/src/mndScheduler.c +++ b/source/dnode/mnode/impl/src/mndScheduler.c @@ -28,13 +28,15 @@ #include "mndTrans.h" #include "mndUser.h" #include "mndVgroup.h" +#include "parser.h" #include "tcompare.h" #include "tname.h" #include "tuuid.h" extern bool tsStreamSchedV; -int32_t mndConvertRSmaTask(const char* ast, int8_t triggerType, int64_t watermark, char** pStr, int32_t* pLen) { +int32_t mndConvertRSmaTask(const char* ast, int64_t uid, int8_t triggerType, int64_t watermark, char** pStr, + int32_t* pLen) { SNode* pAst = NULL; SQueryPlan* pPlan = NULL; terrno = TSDB_CODE_SUCCESS; @@ -44,6 +46,11 @@ int32_t mndConvertRSmaTask(const char* ast, int8_t triggerType, int64_t watermar goto END; } + if (qSetSTableIdForRSma(pAst, uid) < 0) { + terrno = TSDB_CODE_QRY_INVALID_INPUT; + goto END; + } + SPlanContext cxt = { .pAstRoot = pAst, .topicQuery = false, @@ -206,6 +213,7 @@ int32_t mndAddShuffledSinkToStream(SMnode* pMnode, STrans* pTrans, SStreamObj* p } else { pTask->sinkType = TASK_SINK__TABLE; pTask->tbSink.stbUid = pStream->targetStbUid; + memcpy(pTask->tbSink.stbFullName, pStream->targetSTbName, TSDB_TABLE_FNAME_LEN); pTask->tbSink.pSchemaWrapper = tCloneSSchemaWrapper(&pStream->outputSchema); ASSERT(pTask->tbSink.pSchemaWrapper); } @@ -229,11 +237,14 @@ int32_t mndAddFixedSinkToStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStr taosArrayPush(tasks, &pTask); pTask->nodeId = pStream->fixedSinkVgId; +#if 0 SVgObj* pVgroup = mndAcquireVgroup(pMnode, pStream->fixedSinkVgId); if (pVgroup == NULL) { return -1; } pTask->epSet = mndGetVgroupEpset(pMnode, pVgroup); +#endif + pTask->epSet = mndGetVgroupEpset(pMnode, &pStream->fixedSinkVg); // source pTask->sourceType = TASK_SOURCE__MERGE; pTask->inputType = TASK_INPUT_TYPE__DATA_BLOCK; @@ -248,13 +259,15 @@ int32_t mndAddFixedSinkToStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStr } else { pTask->sinkType = TASK_SINK__TABLE; pTask->tbSink.stbUid = pStream->targetStbUid; + memcpy(pTask->tbSink.stbFullName, pStream->targetSTbName, TSDB_TABLE_FNAME_LEN); pTask->tbSink.pSchemaWrapper = tCloneSSchemaWrapper(&pStream->outputSchema); } // dispatch pTask->dispatchType = TASK_DISPATCH__NONE; - mndPersistTaskDeployReq(pTrans, pTask, &pTask->epSet, TDMT_VND_TASK_DEPLOY, pVgroup->vgId); + /*mndPersistTaskDeployReq(pTrans, pTask, &pTask->epSet, TDMT_VND_TASK_DEPLOY, pVgroup->vgId);*/ + mndPersistTaskDeployReq(pTrans, pTask, &pTask->epSet, TDMT_VND_TASK_DEPLOY, pStream->fixedSinkVg.vgId); return 0; } @@ -325,6 +338,7 @@ int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream) { } else { pTask->sinkType = TASK_SINK__TABLE; pTask->tbSink.stbUid = pStream->targetStbUid; + memcpy(pTask->tbSink.stbFullName, pStream->targetSTbName, TSDB_TABLE_FNAME_LEN); pTask->tbSink.pSchemaWrapper = tCloneSSchemaWrapper(&pStream->outputSchema); } #endif @@ -345,7 +359,8 @@ int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream) { // one merge only ASSERT(taosArrayGetSize(pArray) == 1); SStreamTask* lastLevelTask = taosArrayGetP(pArray, 0); - pTask->dispatchMsgType = TDMT_VND_TASK_MERGE_EXEC; + /*pTask->dispatchMsgType = TDMT_VND_TASK_MERGE_EXEC;*/ + pTask->dispatchMsgType = TDMT_VND_TASK_DISPATCH; pTask->dispatchType = TASK_DISPATCH__FIXED; pTask->fixedEpDispatcher.taskId = lastLevelTask->taskId; @@ -390,7 +405,8 @@ int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream) { if (pStream->fixedSinkVgId == 0) { pTask->dispatchType = TASK_DISPATCH__SHUFFLE; - pTask->dispatchMsgType = TDMT_VND_TASK_WRITE_EXEC; + /*pTask->dispatchMsgType = TDMT_VND_TASK_WRITE_EXEC;*/ + pTask->dispatchMsgType = TDMT_VND_TASK_DISPATCH; SDbObj* pDb = mndAcquireDb(pMnode, pStream->sourceDb); ASSERT(pDb); if (mndExtractDbInfo(pMnode, pDb, &pTask->shuffleDispatcher.dbInfo, NULL) < 0) { @@ -420,7 +436,8 @@ int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream) { } } else { pTask->dispatchType = TASK_DISPATCH__FIXED; - pTask->dispatchMsgType = TDMT_VND_TASK_WRITE_EXEC; + /*pTask->dispatchMsgType = TDMT_VND_TASK_WRITE_EXEC;*/ + pTask->dispatchMsgType = TDMT_VND_TASK_DISPATCH; SArray* pArray = taosArrayGetP(pStream->tasks, 0); // one sink only ASSERT(taosArrayGetSize(pArray) == 1); diff --git a/source/dnode/mnode/impl/src/mndShow.c b/source/dnode/mnode/impl/src/mndShow.c index def6c06896149c4f7c871099d1d9dc7166dc2dd1..6b70825ed46bdfffb703d3c508971a22ec145b82 100644 --- a/source/dnode/mnode/impl/src/mndShow.c +++ b/source/dnode/mnode/impl/src/mndShow.c @@ -257,6 +257,7 @@ static int32_t mndProcessRetrieveSysTableReq(SRpcMsg *pReq) { terrno = rowsRead; mDebug("show:0x%" PRIx64 ", retrieve completed", pShow->id); mndReleaseShowObj(pShow, true); + blockDataDestroy(pBlock); return -1; } diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c index b38e901d49dee8e6c93dc629824e51bf44c71e0f..7b5d1b6c32eaf802eb0683a385acd85498b8a88d 100644 --- a/source/dnode/mnode/impl/src/mndSma.c +++ b/source/dnode/mnode/impl/src/mndSma.c @@ -295,9 +295,9 @@ static void *mndBuildVCreateSmaReq(SMnode *pMnode, SVgObj *pVgroup, SSmaObj *pSm } static void *mndBuildVDropSmaReq(SMnode *pMnode, SVgObj *pVgroup, SSmaObj *pSma, int32_t *pContLen) { - SEncoder encoder = {0}; - int32_t contLen; - SName name = {0}; + SEncoder encoder = {0}; + int32_t contLen; + SName name = {0}; tNameFromString(&name, pSma->name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); SVDropTSmaReq req = {0}; @@ -354,6 +354,22 @@ static int32_t mndSetCreateSmaCommitLogs(SMnode *pMnode, STrans *pTrans, SSmaObj return 0; } +static int32_t mndSetCreateSmaVgroupRedoLogs(SMnode *pMnode, STrans *pTrans, SVgObj *pVgroup) { + SSdbRaw *pVgRaw = mndVgroupActionEncode(pVgroup); + if (pVgRaw == NULL) return -1; + if (mndTransAppendRedolog(pTrans, pVgRaw) != 0) return -1; + if (sdbSetRawStatus(pVgRaw, SDB_STATUS_CREATING) != 0) return -1; + return 0; +} + +static int32_t mndSetCreateSmaVgroupCommitLogs(SMnode *pMnode, STrans *pTrans, SVgObj *pVgroup) { + SSdbRaw *pVgRaw = mndVgroupActionEncode(pVgroup); + if (pVgRaw == NULL) return -1; + if (mndTransAppendCommitlog(pTrans, pVgRaw) != 0) return -1; + if (sdbSetRawStatus(pVgRaw, SDB_STATUS_READY) != 0) return -1; + return 0; +} + static int32_t mndSetCreateSmaRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SSmaObj *pSma) { SSdb *pSdb = pMnode->pSdb; SVgObj *pVgroup = NULL; @@ -393,6 +409,34 @@ static int32_t mndSetCreateSmaRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj return 0; } +static int32_t mndSetCreateSmaVgroupRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup) { + SVnodeGid *pVgid = pVgroup->vnodeGid + 0; + SDnodeObj *pDnode = mndAcquireDnode(pMnode, pVgid->dnodeId); + if (pDnode == NULL) return -1; + + STransAction action = {0}; + action.epSet = mndGetDnodeEpset(pDnode); + mndReleaseDnode(pMnode, pDnode); + + // todo add sma info here + + int32_t contLen = 0; + void *pReq = mndBuildCreateVnodeReq(pMnode, pDnode, pDb, pVgroup, &contLen); + if (pReq == NULL) return -1; + + action.pCont = pReq; + action.contLen = contLen; + action.msgType = TDMT_DND_CREATE_VNODE; + action.acceptableCode = TSDB_CODE_NODE_ALREADY_DEPLOYED; + + if (mndTransAppendRedoAction(pTrans, &action) != 0) { + taosMemoryFree(pReq); + return -1; + } + + return 0; +} + static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCreate, SDbObj *pDb, SStbObj *pStb) { SSmaObj smaObj = {0}; memcpy(smaObj.name, pCreate->name, TSDB_TABLE_FNAME_LEN); @@ -448,9 +492,14 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea streamObj.version = 1; streamObj.sql = pCreate->sql; streamObj.createdBy = STREAM_CREATED_BY__SMA; - streamObj.fixedSinkVgId = smaObj.dstVgId; streamObj.smaId = smaObj.uid; - /*streamObj.physicalPlan = "";*/ + + if (mndAllocSmaVgroup(pMnode, pDb, &streamObj.fixedSinkVg) != 0) { + mError("sma:%s, failed to create since %s", smaObj.name, terrstr()); + return -1; + } + smaObj.dstVgId = streamObj.fixedSinkVg.vgId; + streamObj.fixedSinkVgId = smaObj.dstVgId; int32_t code = -1; STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_CREATE_SMA, pReq); @@ -458,10 +507,14 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea mDebug("trans:%d, used to create sma:%s", pTrans->id, pCreate->name); mndTransSetDbInfo(pTrans, pDb); + mndTransSetExecOneByOne(pTrans); if (mndSetCreateSmaRedoLogs(pMnode, pTrans, &smaObj) != 0) goto _OVER; + if (mndSetCreateSmaVgroupRedoLogs(pMnode, pTrans, &streamObj.fixedSinkVg) != 0) goto _OVER; if (mndSetCreateSmaCommitLogs(pMnode, pTrans, &smaObj) != 0) goto _OVER; + if (mndSetCreateSmaVgroupCommitLogs(pMnode, pTrans, &streamObj.fixedSinkVg) != 0) goto _OVER; if (mndSetCreateSmaRedoActions(pMnode, pTrans, pDb, &smaObj) != 0) goto _OVER; + if (mndSetCreateSmaVgroupRedoActions(pMnode, pTrans, pDb, &streamObj.fixedSinkVg) != 0) goto _OVER; if (mndAddStreamToTrans(pMnode, &streamObj, pCreate->ast, STREAM_TRIGGER_AT_ONCE, 0, pTrans) != 0) goto _OVER; if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER; @@ -480,7 +533,6 @@ static int32_t mndCheckCreateSmaReq(SMCreateSmaReq *pCreate) { if (pCreate->intervalUnit < 0) return -1; if (pCreate->slidingUnit < 0) return -1; if (pCreate->timezone < 0) return -1; - if (pCreate->dstVgId < 0) return -1; if (pCreate->interval < 0) return -1; if (pCreate->offset < 0) return -1; if (pCreate->sliding < 0) return -1; @@ -602,6 +654,24 @@ static int32_t mndSetDropSmaCommitLogs(SMnode *pMnode, STrans *pTrans, SSmaObj * return 0; } +static int32_t mndSetDropSmaVgroupRedoLogs(SMnode *pMnode, STrans *pTrans, SVgObj *pVgroup) { + SSdbRaw *pVgRaw = mndVgroupActionEncode(pVgroup); + if (pVgRaw == NULL) return -1; + if (mndTransAppendRedolog(pTrans, pVgRaw) != 0) return -1; + if (sdbSetRawStatus(pVgRaw, SDB_STATUS_DROPPING) != 0) return -1; + + return 0; +} + +static int32_t mndSetDropSmaVgroupCommitLogs(SMnode *pMnode, STrans *pTrans, SVgObj *pVgroup) { + SSdbRaw *pVgRaw = mndVgroupActionEncode(pVgroup); + if (pVgRaw == NULL) return -1; + if (mndTransAppendCommitlog(pTrans, pVgRaw) != 0) return -1; + if (sdbSetRawStatus(pVgRaw, SDB_STATUS_DROPPED) != 0) return -1; + + return 0; +} + static int32_t mndSetDropSmaRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SSmaObj *pSma) { SSdb *pSdb = pMnode->pSdb; SVgObj *pVgroup = NULL; @@ -643,23 +713,59 @@ static int32_t mndSetDropSmaRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj * return 0; } +static int32_t mndSetDropSmaVgroupRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup) { + SVnodeGid *pVgid = pVgroup->vnodeGid + 0; + SDnodeObj *pDnode = mndAcquireDnode(pMnode, pVgid->dnodeId); + if (pDnode == NULL) return -1; + + STransAction action = {0}; + action.epSet = mndGetDnodeEpset(pDnode); + mndReleaseDnode(pMnode, pDnode); + + int32_t contLen = 0; + void *pReq = mndBuildDropVnodeReq(pMnode, pDnode, pDb, pVgroup, &contLen); + if (pReq == NULL) return -1; + + action.pCont = pReq; + action.contLen = contLen; + action.msgType = TDMT_DND_DROP_VNODE; + action.acceptableCode = TSDB_CODE_NODE_NOT_DEPLOYED; + + if (mndTransAppendRedoAction(pTrans, &action) != 0) { + taosMemoryFree(pReq); + return -1; + } + + return 0; +} + static int32_t mndDropSma(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SSmaObj *pSma) { int32_t code = -1; - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_DROP_SMA, pReq); + SVgObj *pVgroup = NULL; + STrans *pTrans = NULL; + + pVgroup = mndAcquireVgroup(pMnode, pSma->dstVgId); + if (pVgroup == NULL) goto _OVER; + + pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_DROP_SMA, pReq); if (pTrans == NULL) goto _OVER; mDebug("trans:%d, used to drop sma:%s", pTrans->id, pSma->name); mndTransSetDbInfo(pTrans, pDb); if (mndSetDropSmaRedoLogs(pMnode, pTrans, pSma) != 0) goto _OVER; + if (mndSetDropSmaVgroupRedoLogs(pMnode, pTrans, pVgroup) != 0) goto _OVER; if (mndSetDropSmaCommitLogs(pMnode, pTrans, pSma) != 0) goto _OVER; + if (mndSetDropSmaVgroupCommitLogs(pMnode, pTrans, pVgroup) != 0) goto _OVER; if (mndSetDropSmaRedoActions(pMnode, pTrans, pDb, pSma) != 0) goto _OVER; + if (mndSetDropSmaVgroupRedoActions(pMnode, pTrans, pDb, pVgroup) != 0) goto _OVER; if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER; code = 0; _OVER: mndTransDrop(pTrans); + mndReleaseVgroup(pMnode, pVgroup); return code; } @@ -846,6 +952,9 @@ static int32_t mndRetrieveSma(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBloc pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, (const char *)n1, false); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, (const char *)&pSma->dstVgId, false); + numOfRows++; sdbRelease(pSdb, pSma); } diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index f6043615abea753db5e9a0bb7915932522eb7900..b33c09a0f9d0a4740a3b0b9ce9fb06dd5ea878ae 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -87,7 +87,6 @@ SSdbRaw *mndStbActionEncode(SStbObj *pStb) { SDB_SET_INT64(pRaw, dataPos, pStb->updateTime, _OVER) SDB_SET_INT64(pRaw, dataPos, pStb->uid, _OVER) SDB_SET_INT64(pRaw, dataPos, pStb->dbUid, _OVER) - SDB_SET_INT32(pRaw, dataPos, pStb->version, _OVER) SDB_SET_INT32(pRaw, dataPos, pStb->tagVer, _OVER) SDB_SET_INT32(pRaw, dataPos, pStb->colVer, _OVER) SDB_SET_INT32(pRaw, dataPos, pStb->nextColId, _OVER) @@ -167,7 +166,6 @@ static SSdbRow *mndStbActionDecode(SSdbRaw *pRaw) { SDB_GET_INT64(pRaw, dataPos, &pStb->updateTime, _OVER) SDB_GET_INT64(pRaw, dataPos, &pStb->uid, _OVER) SDB_GET_INT64(pRaw, dataPos, &pStb->dbUid, _OVER) - SDB_GET_INT32(pRaw, dataPos, &pStb->version, _OVER) SDB_GET_INT32(pRaw, dataPos, &pStb->tagVer, _OVER) SDB_GET_INT32(pRaw, dataPos, &pStb->colVer, _OVER) SDB_GET_INT32(pRaw, dataPos, &pStb->nextColId, _OVER) @@ -320,7 +318,6 @@ static int32_t mndStbActionUpdate(SSdb *pSdb, SStbObj *pOld, SStbObj *pNew) { } pOld->updateTime = pNew->updateTime; - pOld->version = pNew->version; pOld->tagVer = pNew->tagVer; pOld->colVer = pNew->colVer; pOld->nextColId = pNew->nextColId; @@ -388,25 +385,26 @@ static void *mndBuildVCreateStbReq(SMnode *pMnode, SVgObj *pVgroup, SStbObj *pSt req.name = (char *)tNameGetTableName(&name); req.suid = pStb->uid; req.rollup = pStb->ast1Len > 0 ? 1 : 0; - req.schema.nCols = pStb->numOfColumns; - req.schema.sver = pStb->version; - req.schema.tagVer = pStb->tagVer; - req.schema.colVer = pStb->colVer; - req.schema.pSchema = pStb->pColumns; + // todo + req.schemaRow.nCols = pStb->numOfColumns; + req.schemaRow.version = pStb->colVer; + req.schemaRow.pSchema = pStb->pColumns; req.schemaTag.nCols = pStb->numOfTags; - req.schemaTag.sver = 1; + req.schemaTag.version = pStb->tagVer; req.schemaTag.pSchema = pStb->pTags; if (req.rollup) { req.pRSmaParam.xFilesFactor = pStb->xFilesFactor; req.pRSmaParam.delay = pStb->delay; if (pStb->ast1Len > 0) { - if (mndConvertRSmaTask(pStb->pAst1, 0, 0, &req.pRSmaParam.qmsg1, &req.pRSmaParam.qmsg1Len) != TSDB_CODE_SUCCESS) { + if (mndConvertRSmaTask(pStb->pAst1, pStb->uid, 0, 0, &req.pRSmaParam.qmsg1, &req.pRSmaParam.qmsg1Len) != + TSDB_CODE_SUCCESS) { return NULL; } } if (pStb->ast2Len > 0) { - if (mndConvertRSmaTask(pStb->pAst2, 0, 0, &req.pRSmaParam.qmsg2, &req.pRSmaParam.qmsg2Len) != TSDB_CODE_SUCCESS) { + if (mndConvertRSmaTask(pStb->pAst2, pStb->uid, 0, 0, &req.pRSmaParam.qmsg2, &req.pRSmaParam.qmsg2Len) != + TSDB_CODE_SUCCESS) { return NULL; } } @@ -664,7 +662,6 @@ int32_t mndBuildStbFromReq(SMnode *pMnode, SStbObj *pDst, SMCreateStbReq *pCreat pDst->updateTime = pDst->createdTime; pDst->uid = mndGenerateUid(pCreate->name, TSDB_TABLE_FNAME_LEN); pDst->dbUid = pDb->uid; - pDst->version = 1; pDst->tagVer = 1; pDst->colVer = 1; pDst->nextColId = 1; @@ -743,9 +740,7 @@ static int32_t mndCreateStb(SMnode *pMnode, SRpcMsg *pReq, SMCreateStbReq *pCrea mDebug("trans:%d, used to create stb:%s", pTrans->id, pCreate->name); - if (mndBuildStbFromReq(pMnode, &stbObj, pCreate, pDb) != 0) { - goto _OVER; - } + if (mndBuildStbFromReq(pMnode, &stbObj, pCreate, pDb) != 0) goto _OVER; if (mndAddStbToTrans(pMnode, pTrans, pDb, &stbObj) < 0) goto _OVER; @@ -958,7 +953,6 @@ static int32_t mndAddSuperTableTag(const SStbObj *pOld, SStbObj *pNew, SArray *p mDebug("stb:%s, start to add tag %s", pNew->name, pSchema->name); } - pNew->version++; pNew->tagVer++; return 0; } @@ -977,7 +971,6 @@ static int32_t mndDropSuperTableTag(const SStbObj *pOld, SStbObj *pNew, const ch memmove(pNew->pTags + tag, pNew->pTags + tag + 1, sizeof(SSchema) * (pNew->numOfTags - tag - 1)); pNew->numOfTags--; - pNew->version++; pNew->tagVer++; mDebug("stb:%s, start to drop tag %s", pNew->name, tagName); return 0; @@ -1018,7 +1011,6 @@ static int32_t mndAlterStbTagName(const SStbObj *pOld, SStbObj *pNew, SArray *pF SSchema *pSchema = (SSchema *)(pNew->pTags + tag); memcpy(pSchema->name, newTagName, TSDB_COL_NAME_LEN); - pNew->version++; pNew->tagVer++; mDebug("stb:%s, start to modify tag %s to %s", pNew->name, oldTagName, newTagName); return 0; @@ -1048,7 +1040,6 @@ static int32_t mndAlterStbTagBytes(const SStbObj *pOld, SStbObj *pNew, const SFi } pTag->bytes = pField->bytes; - pNew->version++; pNew->tagVer++; mDebug("stb:%s, start to modify tag len %s to %d", pNew->name, pField->name, pField->bytes); @@ -1088,7 +1079,6 @@ static int32_t mndAddSuperTableColumn(const SStbObj *pOld, SStbObj *pNew, SArray mDebug("stb:%s, start to add column %s", pNew->name, pSchema->name); } - pNew->version++; pNew->colVer++; return 0; } @@ -1117,7 +1107,6 @@ static int32_t mndDropSuperTableColumn(const SStbObj *pOld, SStbObj *pNew, const memmove(pNew->pColumns + col, pNew->pColumns + col + 1, sizeof(SSchema) * (pNew->numOfColumns - col - 1)); pNew->numOfColumns--; - pNew->version++; pNew->colVer++; mDebug("stb:%s, start to drop col %s", pNew->name, colName); return 0; @@ -1156,7 +1145,6 @@ static int32_t mndAlterStbColumnBytes(const SStbObj *pOld, SStbObj *pNew, const } pCol->bytes = pField->bytes; - pNew->version++; pNew->colVer++; mDebug("stb:%s, start to modify col len %s to %d", pNew->name, pField->name, pField->bytes); @@ -1317,9 +1305,10 @@ static int32_t mndProcessMAlterStbReq(SRpcMsg *pReq) { goto _OVER; } - if (alterReq.verInBlock > 0 && alterReq.verInBlock <= pStb->version) { - mDebug("stb:%s, already exist, verInBlock:%d smaller than verInStb:%d, alter success", alterReq.name, - alterReq.verInBlock, pStb->version); + if ((alterReq.tagVer > 0 && alterReq.colVer > 0) && + (alterReq.tagVer <= pStb->tagVer || alterReq.colVer <= pStb->colVer)) { + mDebug("stb:%s, already exist, tagVer:%d colVer:%d smaller than in mnode, tagVer:%d colVer:%d, alter success", + alterReq.name, alterReq.tagVer, alterReq.colVer, pStb->tagVer, pStb->colVer); code = 0; goto _OVER; } @@ -1513,7 +1502,8 @@ static int32_t mndBuildStbSchemaImp(SDbObj *pDb, SStbObj *pStb, const char *tbNa pRsp->numOfColumns = pStb->numOfColumns; pRsp->precision = pDb->cfg.precision; pRsp->tableType = TSDB_SUPER_TABLE; - pRsp->sversion = pStb->version; + pRsp->sversion = pStb->colVer; + pRsp->tversion = pStb->tagVer; pRsp->suid = pStb->uid; pRsp->tuid = pStb->uid; @@ -1640,7 +1630,7 @@ int32_t mndValidateStbInfo(SMnode *pMnode, SSTableMetaVersion *pStbVersions, int metaRsp.suid = pStbVersion->suid; } - if (pStbVersion->sversion != metaRsp.sversion) { + if (pStbVersion->sversion != metaRsp.sversion || pStbVersion->tversion != metaRsp.tversion) { taosArrayPush(batchMetaRsp.pArray, &metaRsp); } else { tFreeSTableMetaRsp(&metaRsp); diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 7b6383a4704760a83e3520bdacbfb7b28efc7bd1..cbef1facdcd5c1a680c90b3f11936316e12a2a4f 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -279,13 +279,13 @@ int32_t mndAddStreamToTrans(SMnode *pMnode, SStreamObj *pStream, const char *ast } mDebug("trans:%d, used to create stream:%s", pTrans->id, pStream->name); - SSdbRaw *pRedoRaw = mndStreamActionEncode(pStream); - if (pRedoRaw == NULL || mndTransAppendRedolog(pTrans, pRedoRaw) != 0) { - mError("trans:%d, failed to append redo log since %s", pTrans->id, terrstr()); + SSdbRaw *pCommitRaw = mndStreamActionEncode(pStream); + if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) { + mError("trans:%d, failed to append commit log since %s", pTrans->id, terrstr()); mndTransDrop(pTrans); return -1; } - sdbSetRawStatus(pRedoRaw, SDB_STATUS_READY); + sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY); return 0; } @@ -456,7 +456,7 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) { goto CREATE_STREAM_OVER; } - pDb = mndAcquireDbByStream(pMnode, createStreamReq.name); + pDb = mndAcquireDb(pMnode, createStreamReq.sourceDB); if (pDb == NULL) { terrno = TSDB_CODE_MND_DB_NOT_SELECTED; goto CREATE_STREAM_OVER; diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c index c82472eec05c0f5104dd9f3a9697078e27799948..e58630ddbfdcc8cb01f26ce3aa478e68fac76fa6 100644 --- a/source/dnode/mnode/impl/src/mndSubscribe.c +++ b/source/dnode/mnode/impl/src/mndSubscribe.c @@ -42,6 +42,7 @@ static int32_t mndSubActionDelete(SSdb *pSdb, SMqSubscribeObj *); static int32_t mndSubActionUpdate(SSdb *pSdb, SMqSubscribeObj *pOldSub, SMqSubscribeObj *pNewSub); static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg); +static int32_t mndProcessDropCgroupReq(SRpcMsg *pMsg); static int32_t mndProcessSubscribeInternalRsp(SRpcMsg *pMsg); static int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows); @@ -75,6 +76,8 @@ int32_t mndInitSubscribe(SMnode *pMnode) { mndSetMsgHandle(pMnode, TDMT_VND_MQ_VG_CHANGE_RSP, mndProcessSubscribeInternalRsp); mndSetMsgHandle(pMnode, TDMT_VND_MQ_VG_DELETE_RSP, mndProcessSubscribeInternalRsp); mndSetMsgHandle(pMnode, TDMT_MND_MQ_DO_REBALANCE, mndProcessRebalanceReq); + mndSetMsgHandle(pMnode, TDMT_MND_MQ_DO_REBALANCE, mndProcessRebalanceReq); + mndSetMsgHandle(pMnode, TDMT_MND_MQ_DROP_CGROUP, mndProcessDropCgroupReq); mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_SUBSCRIPTIONS, mndRetrieveSubscribe); mndAddShowFreeIterHandle(pMnode, TSDB_MGMT_TABLE_TOPICS, mndCancelGetNextSubscribe); @@ -417,7 +420,7 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu // 2. redo log: subscribe and vg assignment // subscribe - if (mndSetSubRedoLogs(pMnode, pTrans, pOutput->pSub) != 0) { + if (mndSetSubCommitLogs(pMnode, pTrans, pOutput->pSub) != 0) { goto REB_FAIL; } @@ -479,14 +482,18 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu SMqTopicObj topicObj = {0}; memcpy(&topicObj, pTopic, sizeof(SMqTopicObj)); topicObj.refConsumerCnt = pTopic->refConsumerCnt - consumerNum; - if (mndSetTopicRedoLogs(pMnode, pTrans, &topicObj) != 0) goto REB_FAIL; + // TODO is that correct? + pTopic->refConsumerCnt = topicObj.refConsumerCnt; + mInfo("subscribe topic %s unref %d consumer cgroup %s, refcnt %d", pTopic->name, consumerNum, cgroup, + topicObj.refConsumerCnt); + if (mndSetTopicCommitLogs(pMnode, pTrans, &topicObj) != 0) goto REB_FAIL; } } // 4. TODO commit log: modification log // 5. set cb - mndTransSetCb(pTrans, MQ_REB_TRANS_START_FUNC, MQ_REB_TRANS_STOP_FUNC, NULL, 0); + mndTransSetCb(pTrans, TRANS_START_FUNC_MQ_REB, TRANS_STOP_FUNC_TEST_MQ_REB, NULL, 0); // 6. execution if (mndTransPrepare(pMnode, pTrans) != 0) goto REB_FAIL; @@ -577,6 +584,57 @@ static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg) { return 0; } +static int32_t mndProcessDropCgroupReq(SRpcMsg *pReq) { + SMnode *pMnode = pReq->info.node; + /*SSdb *pSdb = pMnode->pSdb;*/ + SMDropCgroupReq dropReq = {0}; + + if (tDeserializeSMDropCgroupReq(pReq->pCont, pReq->contLen, &dropReq) != 0) { + terrno = TSDB_CODE_INVALID_MSG; + return -1; + } + + SMqSubscribeObj *pSub = mndAcquireSubscribe(pMnode, dropReq.cgroup, dropReq.topic); + if (pSub == NULL) { + if (dropReq.igNotExists) { + mDebug("cgroup:%s on topic:%s, not exist, ignore not exist is set", dropReq.cgroup, dropReq.topic); + return 0; + } else { + terrno = TSDB_CODE_MND_SUBSCRIBE_NOT_EXIST; + mError("topic:%s, cgroup:%s, failed to drop since %s", dropReq.topic, dropReq.cgroup, terrstr()); + return -1; + } + } + + if (taosHashGetSize(pSub->consumerHash) == 0) { + terrno = TSDB_CODE_MND_CGROUP_USED; + mError("cgroup:%s on topic:%s, failed to drop since %s", dropReq.cgroup, dropReq.topic, terrstr()); + return -1; + } + + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_DROP_CGROUP, pReq); + if (pTrans == NULL) { + mError("cgroup: %s on topic:%s, failed to drop since %s", dropReq.cgroup, dropReq.topic, terrstr()); + return -1; + } + + mDebug("trans:%d, used to drop cgroup:%s on topic %s", pTrans->id, dropReq.cgroup, dropReq.topic); + + if (mndDropOffsetBySubKey(pMnode, pTrans, pSub->key) < 0) { + ASSERT(0); + return -1; + } + + if (mndSetDropSubCommitLogs(pMnode, pTrans, pSub) < 0) { + mError("cgroup %s on topic:%s, failed to drop since %s", dropReq.cgroup, dropReq.topic, terrstr()); + return -1; + } + + mndReleaseSubscribe(pMnode, pSub); + + return TSDB_CODE_ACTION_IN_PROGRESS; +} + void mndCleanupSubscribe(SMnode *pMnode) {} static SSdbRaw *mndSubActionEncode(SMqSubscribeObj *pSub) { @@ -731,7 +789,7 @@ static int32_t mndSetDropSubRedoLogs(SMnode *pMnode, STrans *pTrans, SMqSubscrib return 0; } -static int32_t mndSetDropSubCommitLogs(SMnode *pMnode, STrans *pTrans, SMqSubscribeObj *pSub) { +int32_t mndSetDropSubCommitLogs(SMnode *pMnode, STrans *pTrans, SMqSubscribeObj *pSub) { SSdbRaw *pCommitRaw = mndSubActionEncode(pSub); if (pCommitRaw == NULL) return -1; if (mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) return -1; @@ -882,7 +940,7 @@ static int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock } // do not show for cleared subscription -#if 0 +#if 1 int32_t sz = taosArrayGetSize(pSub->unassignedVgs); for (int32_t i = 0; i < sz; i++) { SMqVgEp *pVgEp = taosArrayGetP(pSub->unassignedVgs, i); diff --git a/source/dnode/mnode/impl/src/mndSync.c b/source/dnode/mnode/impl/src/mndSync.c index 3dbe3241a78f617a99148f8c571189eea41e17b5..8b602d796c47f29efa8dcfb059d2aff5b3b9de40 100644 --- a/source/dnode/mnode/impl/src/mndSync.c +++ b/source/dnode/mnode/impl/src/mndSync.c @@ -17,178 +17,251 @@ #include "mndSync.h" #include "mndTrans.h" -static int32_t mndInitWal(SMnode *pMnode) { - SSyncMgmt *pMgmt = &pMnode->syncMgmt; - - char path[PATH_MAX] = {0}; - snprintf(path, sizeof(path), "%s%swal", pMnode->path, TD_DIRSEP); - SWalCfg cfg = { - .vgId = 1, - .fsyncPeriod = 0, - .rollPeriod = -1, - .segSize = -1, - .retentionPeriod = -1, - .retentionSize = -1, - .level = TAOS_WAL_FSYNC, - }; - pMgmt->pWal = walOpen(path, &cfg); - if (pMgmt->pWal == NULL) return -1; +int32_t mndSyncEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) { + SMsgHead *pHead = pMsg->pCont; + pHead->contLen = htonl(pHead->contLen); + pHead->vgId = htonl(pHead->vgId); - return 0; + return tmsgPutToQueue(msgcb, SYNC_QUEUE, pMsg); } -static void mndCloseWal(SMnode *pMnode) { - SSyncMgmt *pMgmt = &pMnode->syncMgmt; - if (pMgmt->pWal != NULL) { - walClose(pMgmt->pWal); - pMgmt->pWal = NULL; - } -} +int32_t mndSyncSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) { return tmsgSendReq(pEpSet, pMsg); } -static int32_t mndRestoreWal(SMnode *pMnode) { - SWal *pWal = pMnode->syncMgmt.pWal; - SSdb *pSdb = pMnode->pSdb; - int64_t lastSdbVer = sdbUpdateVer(pSdb, 0); - int32_t code = -1; +void mndSyncCommitMsg(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) { + SMnode *pMnode = pFsm->data; + SSyncMgmt *pMgmt = &pMnode->syncMgmt; + SSdbRaw *pRaw = pMsg->pCont; - SWalReadHandle *pHandle = walOpenReadHandle(pWal); - if (pHandle == NULL) return -1; + int32_t transId = sdbGetIdFromRaw(pMnode->pSdb, pRaw); + pMgmt->errCode = cbMeta.code; + mTrace("trans:%d, is proposed, savedTransId:%d code:0x%x, ver:%" PRId64 " term:%" PRId64 " role:%s raw:%p", transId, + pMgmt->transId, cbMeta.code, cbMeta.index, cbMeta.term, syncStr(cbMeta.state), pRaw); - int64_t first = walGetFirstVer(pWal); - int64_t last = walGetLastVer(pWal); - mDebug("start to restore wal, sdbver:%" PRId64 ", first:%" PRId64 " last:%" PRId64, lastSdbVer, first, last); + if (pMgmt->errCode == 0) { + sdbWriteWithoutFree(pMnode->pSdb, pRaw); + sdbSetApplyIndex(pMnode->pSdb, cbMeta.index); + sdbSetApplyTerm(pMnode->pSdb, cbMeta.term); + } - first = TMAX(lastSdbVer + 1, first); - for (int64_t ver = first; ver >= 0 && ver <= last; ++ver) { - if (walReadWithHandle(pHandle, ver) < 0) { - mError("ver:%" PRId64 ", failed to read from wal since %s", ver, terrstr()); - goto _OVER; + if (pMgmt->transId == transId) { + if (pMgmt->errCode != 0) { + mError("trans:%d, failed to propose since %s", transId, tstrerror(pMgmt->errCode)); } - - SWalHead *pHead = pHandle->pHead; - int64_t sdbVer = sdbUpdateVer(pSdb, 0); - if (sdbVer + 1 != ver) { - terrno = TSDB_CODE_SDB_INVALID_WAl_VER; - mError("ver:%" PRId64 ", failed to write to sdb, since inconsistent with sdbver:%" PRId64, ver, sdbVer); - goto _OVER; + tsem_post(&pMgmt->syncSem); + } else { + if (cbMeta.index - sdbGetApplyIndex(pMnode->pSdb) > 100) { + sdbWriteFile(pMnode->pSdb); } + } +} - mTrace("ver:%" PRId64 ", will be restored, content:%p", ver, pHead->head.body); - if (sdbWriteWithoutFree(pSdb, (void *)pHead->head.body) < 0) { - mError("ver:%" PRId64 ", failed to write to sdb since %s", ver, terrstr()); - goto _OVER; - } +int32_t mndSyncGetSnapshot(struct SSyncFSM *pFsm, SSnapshot *pSnapshot) { + SMnode *pMnode = pFsm->data; + pSnapshot->lastApplyIndex = sdbGetApplyIndex(pMnode->pSdb); + pSnapshot->lastApplyTerm = sdbGetApplyTerm(pMnode->pSdb); + return 0; +} - sdbUpdateVer(pSdb, 1); - mDebug("ver:%" PRId64 ", is restored", ver); +void mndRestoreFinish(struct SSyncFSM *pFsm) { + SMnode *pMnode = pFsm->data; + if (!pMnode->deploy) { + mInfo("mnode sync restore finished"); + mndTransPullup(pMnode); + mndSetRestore(pMnode, true); + } else { + mInfo("mnode sync restore finished, and will set ready after first deploy"); } +} - int64_t sdbVer = sdbUpdateVer(pSdb, 0); - mDebug("restore wal finished, sdbver:%" PRId64, sdbVer); +void mndReConfig(struct SSyncFSM *pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta) { + SMnode *pMnode = pFsm->data; + SSyncMgmt *pMgmt = &pMnode->syncMgmt; - mndTransPullup(pMnode); - sdbVer = sdbUpdateVer(pSdb, 0); - mDebug("pullup trans finished, sdbver:%" PRId64, sdbVer); + pMgmt->errCode = cbMeta.code; + mInfo("trans:-1, sync reconfig is proposed, savedTransId:%d code:0x%x, curTerm:%" PRId64 " term:%" PRId64, + pMgmt->transId, cbMeta.code, cbMeta.index, cbMeta.term); - if (sdbVer != lastSdbVer) { - mInfo("sdb restored from %" PRId64 " to %" PRId64 ", write file", lastSdbVer, sdbVer); - if (sdbWriteFile(pSdb) != 0) { - goto _OVER; + if (pMgmt->transId == -1) { + if (pMgmt->errCode != 0) { + mError("trans:-1, failed to propose sync reconfig since %s", tstrerror(pMgmt->errCode)); } + tsem_post(&pMgmt->syncSem); + } +} - if (walCommit(pWal, sdbVer) != 0) { - goto _OVER; - } +int32_t mndSnapshotStartRead(struct SSyncFSM *pFsm, void **ppReader) { + mInfo("start to read snapshot from sdb"); + SMnode *pMnode = pFsm->data; + return sdbStartRead(pMnode->pSdb, (SSdbIter **)ppReader); +} - if (walBeginSnapshot(pWal, sdbVer) < 0) { - goto _OVER; - } +int32_t mndSnapshotStopRead(struct SSyncFSM *pFsm, void *pReader) { + mInfo("stop to read snapshot from sdb"); + SMnode *pMnode = pFsm->data; + return sdbStopRead(pMnode->pSdb, pReader); +} - if (walEndSnapshot(pWal) < 0) { - goto _OVER; - } - } +int32_t mndSnapshotDoRead(struct SSyncFSM *pFsm, void *pReader, void **ppBuf, int32_t *len) { + SMnode *pMnode = pFsm->data; + return sdbDoRead(pMnode->pSdb, pReader, ppBuf, len); +} - code = 0; +int32_t mndSnapshotStartWrite(struct SSyncFSM *pFsm, void **ppWriter) { + mInfo("start to apply snapshot to sdb"); + SMnode *pMnode = pFsm->data; + return sdbStartWrite(pMnode->pSdb, (SSdbIter **)ppWriter); +} + +int32_t mndSnapshotStopWrite(struct SSyncFSM *pFsm, void *pWriter, bool isApply) { + mInfo("stop to apply snapshot to sdb, apply:%d", isApply); + SMnode *pMnode = pFsm->data; + return sdbStopWrite(pMnode->pSdb, pWriter, isApply); +} + +int32_t mndSnapshotDoWrite(struct SSyncFSM *pFsm, void *pWriter, void *pBuf, int32_t len) { + SMnode *pMnode = pFsm->data; + return sdbDoWrite(pMnode->pSdb, pWriter, pBuf, len); +} -_OVER: - walCloseReadHandle(pHandle); - return code; +SSyncFSM *mndSyncMakeFsm(SMnode *pMnode) { + SSyncFSM *pFsm = taosMemoryCalloc(1, sizeof(SSyncFSM)); + pFsm->data = pMnode; + pFsm->FpCommitCb = mndSyncCommitMsg; + pFsm->FpPreCommitCb = NULL; + pFsm->FpRollBackCb = NULL; + pFsm->FpRestoreFinishCb = mndRestoreFinish; + pFsm->FpReConfigCb = mndReConfig; + pFsm->FpGetSnapshot = mndSyncGetSnapshot; + pFsm->FpSnapshotStartRead = mndSnapshotStartRead; + pFsm->FpSnapshotStopRead = mndSnapshotStopRead; + pFsm->FpSnapshotDoRead = mndSnapshotDoRead; + pFsm->FpSnapshotStartWrite = mndSnapshotStartWrite; + pFsm->FpSnapshotStopWrite = mndSnapshotStopWrite; + pFsm->FpSnapshotDoWrite = mndSnapshotDoWrite; + return pFsm; } int32_t mndInitSync(SMnode *pMnode) { SSyncMgmt *pMgmt = &pMnode->syncMgmt; - tsem_init(&pMgmt->syncSem, 0, 0); - if (mndInitWal(pMnode) < 0) { + char path[PATH_MAX + 20] = {0}; + snprintf(path, sizeof(path), "%s%swal", pMnode->path, TD_DIRSEP); + SWalCfg cfg = { + .vgId = 1, + .fsyncPeriod = 0, + .rollPeriod = -1, + .segSize = -1, + .retentionPeriod = -1, + .retentionSize = -1, + .level = TAOS_WAL_FSYNC, + }; + + pMgmt->pWal = walOpen(path, &cfg); + if (pMgmt->pWal == NULL) { mError("failed to open wal since %s", terrstr()); return -1; } - if (mndRestoreWal(pMnode) < 0) { - mError("failed to restore wal since %s", terrstr()); - return -1; + SSyncInfo syncInfo = {.vgId = 1, .FpSendMsg = mndSyncSendMsg, .FpEqMsg = mndSyncEqMsg}; + snprintf(syncInfo.path, sizeof(syncInfo.path), "%s%ssync", pMnode->path, TD_DIRSEP); + syncInfo.pWal = pMgmt->pWal; + syncInfo.pFsm = mndSyncMakeFsm(pMnode); + syncInfo.isStandBy = pMgmt->standby; + + SSyncCfg *pCfg = &syncInfo.syncCfg; + pCfg->replicaNum = pMnode->replica; + pCfg->myIndex = pMnode->selfIndex; + mInfo("start to open mnode sync, replica:%d myindex:%d standby:%d", pCfg->replicaNum, pCfg->myIndex, pMgmt->standby); + for (int32_t i = 0; i < pMnode->replica; ++i) { + SNodeInfo *pNode = &pCfg->nodeInfo[i]; + tstrncpy(pNode->nodeFqdn, pMnode->replicas[i].fqdn, sizeof(pNode->nodeFqdn)); + pNode->nodePort = pMnode->replicas[i].port; + mInfo("index:%d, fqdn:%s port:%d", i, pNode->nodeFqdn, pNode->nodePort); } - if (pMnode->selfId == 1) { - pMgmt->state = TAOS_SYNC_STATE_LEADER; + tsem_init(&pMgmt->syncSem, 0, 0); + pMgmt->sync = syncOpen(&syncInfo); + if (pMgmt->sync <= 0) { + mError("failed to open sync since %s", terrstr()); + return -1; } - pMgmt->pSyncNode = NULL; + + mDebug("mnode sync is opened, id:%" PRId64, pMgmt->sync); return 0; } void mndCleanupSync(SMnode *pMnode) { SSyncMgmt *pMgmt = &pMnode->syncMgmt; + syncStop(pMgmt->sync); + mDebug("sync:%" PRId64 " is stopped", pMgmt->sync); + tsem_destroy(&pMgmt->syncSem); - mndCloseWal(pMnode); + if (pMgmt->pWal != NULL) { + walClose(pMgmt->pWal); + } + + memset(pMgmt, 0, sizeof(SSyncMgmt)); } -static int32_t mndSyncApplyCb(struct SSyncFSM *fsm, SyncIndex index, const SSyncBuffer *buf, void *pData) { - SMnode *pMnode = pData; +int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw, int32_t transId) { SSyncMgmt *pMgmt = &pMnode->syncMgmt; + SRpcMsg rsp = {.code = TDMT_MND_APPLY_MSG, .contLen = sdbGetRawTotalSize(pRaw)}; + rsp.pCont = rpcMallocCont(rsp.contLen); + if (rsp.pCont == NULL) return -1; + memcpy(rsp.pCont, pRaw, rsp.contLen); pMgmt->errCode = 0; - tsem_post(&pMgmt->syncSem); + pMgmt->transId = transId; + mTrace("trans:%d, will be proposed", pMgmt->transId); + + const bool isWeak = false; + int32_t code = syncPropose(pMgmt->sync, &rsp, isWeak); + if (code == 0) { + tsem_wait(&pMgmt->syncSem); + } else if (code == TAOS_SYNC_PROPOSE_NOT_LEADER) { + terrno = TSDB_CODE_APP_NOT_READY; + } else if (code == TAOS_SYNC_PROPOSE_OTHER_ERROR) { + terrno = TSDB_CODE_SYN_INTERNAL_ERROR; + } else { + terrno = TSDB_CODE_APP_ERROR; + } - return 0; + rpcFreeCont(rsp.pCont); + if (code != 0) { + mError("trans:%d, failed to propose, code:0x%x", pMgmt->transId, code); + return code; + } + + return pMgmt->errCode; } -int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw) { - SWal *pWal = pMnode->syncMgmt.pWal; - SSdb *pSdb = pMnode->pSdb; +void mndSyncStart(SMnode *pMnode) { + SSyncMgmt *pMgmt = &pMnode->syncMgmt; + syncSetMsgCb(pMgmt->sync, &pMnode->msgCb); - int64_t ver = sdbUpdateVer(pSdb, 1); - if (walWrite(pWal, ver, 1, pRaw, sdbGetRawTotalSize(pRaw)) < 0) { - sdbUpdateVer(pSdb, -1); - mError("ver:%" PRId64 ", failed to write raw:%p to wal since %s", ver, pRaw, terrstr()); - return -1; + if (pMgmt->standby) { + syncStartStandBy(pMgmt->sync); + } else { + syncStart(pMgmt->sync); } + mDebug("sync:%" PRId64 " is started, standby:%d", pMgmt->sync, pMgmt->standby); +} - mTrace("ver:%" PRId64 ", write to wal, raw:%p", ver, pRaw); - walCommit(pWal, ver); - walFsync(pWal, true); - -#if 1 - return 0; -#else - if (pMnode->replica == 1) return 0; +void mndSyncStop(SMnode *pMnode) {} +bool mndIsMaster(SMnode *pMnode) { SSyncMgmt *pMgmt = &pMnode->syncMgmt; - pMgmt->errCode = 0; - - SSyncBuffer buf = {.data = pRaw, .len = sdbGetRawTotalSize(pRaw)}; - bool isWeak = false; - int32_t code = syncPropose(pMgmt->pSyncNode, &buf, pMnode, isWeak); - - if (code != 0) return code; + ESyncState state = syncGetMyRole(pMgmt->sync); + if (state != TAOS_SYNC_STATE_LEADER) { + terrno = TSDB_CODE_SYN_NOT_LEADER; + return false; + } - tsem_wait(&pMgmt->syncSem); - return pMgmt->errCode; -#endif -} + if (!pMnode->restored) { + terrno = TSDB_CODE_APP_NOT_READY; + return false; + } -bool mndIsMaster(SMnode *pMnode) { - SSyncMgmt *pMgmt = &pMnode->syncMgmt; - return pMgmt->state == TAOS_SYNC_STATE_LEADER; + return true; } diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c index b9b01a4391eeda3bcf277d4b85f315aa2accf65a..2048c798475062055520fe25e0249f411615b81f 100644 --- a/source/dnode/mnode/impl/src/mndTopic.c +++ b/source/dnode/mnode/impl/src/mndTopic.c @@ -217,7 +217,7 @@ SSdbRow *mndTopicActionDecode(SSdbRaw *pRaw) { } } else { pTopic->schema.nCols = 0; - pTopic->schema.sver = 0; + pTopic->schema.version = 0; pTopic->schema.pSchema = NULL; } @@ -386,14 +386,14 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq * } mDebug("trans:%d, used to create topic:%s", pTrans->id, pCreate->name); - SSdbRaw *pRedoRaw = mndTopicActionEncode(&topicObj); - if (pRedoRaw == NULL || mndTransAppendRedolog(pTrans, pRedoRaw) != 0) { - mError("trans:%d, failed to append redo log since %s", pTrans->id, terrstr()); + SSdbRaw *pCommitRaw = mndTopicActionEncode(&topicObj); + if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) { + mError("trans:%d, failed to append commit log since %s", pTrans->id, terrstr()); taosMemoryFreeClear(topicObj.physicalPlan); mndTransDrop(pTrans); return -1; } - sdbSetRawStatus(pRedoRaw, SDB_STATUS_READY); + sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY); if (mndTransPrepare(pMnode, pTrans) != 0) { mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr()); @@ -473,13 +473,13 @@ CREATE_TOPIC_OVER: } static int32_t mndDropTopic(SMnode *pMnode, STrans *pTrans, SRpcMsg *pReq, SMqTopicObj *pTopic) { - SSdbRaw *pRedoRaw = mndTopicActionEncode(pTopic); - if (pRedoRaw == NULL || mndTransAppendRedolog(pTrans, pRedoRaw) != 0) { - mError("trans:%d, failed to append redo log since %s", pTrans->id, terrstr()); + SSdbRaw *pCommitRaw = mndTopicActionEncode(pTopic); + if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) { + mError("trans:%d, failed to append commit log since %s", pTrans->id, terrstr()); mndTransDrop(pTrans); return -1; } - sdbSetRawStatus(pRedoRaw, SDB_STATUS_DROPPED); + sdbSetRawStatus(pCommitRaw, SDB_STATUS_DROPPED); if (mndTransPrepare(pMnode, pTrans) != 0) { mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr()); @@ -627,11 +627,11 @@ static int32_t mndRetrieveTopic(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl return numOfRows; } -int32_t mndSetTopicRedoLogs(SMnode *pMnode, STrans *pTrans, SMqTopicObj *pTopic) { - SSdbRaw *pRedoRaw = mndTopicActionEncode(pTopic); - if (pRedoRaw == NULL) return -1; - if (mndTransAppendCommitlog(pTrans, pRedoRaw) != 0) return -1; - if (sdbSetRawStatus(pRedoRaw, SDB_STATUS_READY) != 0) return -1; +int32_t mndSetTopicCommitLogs(SMnode *pMnode, STrans *pTrans, SMqTopicObj *pTopic) { + SSdbRaw *pCommitRaw = mndTopicActionEncode(pTopic); + if (pCommitRaw == NULL) return -1; + if (mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) return -1; + if (sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY) != 0) return -1; return 0; } diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c index 5d205197d1734951451a54dac9758979ab4a8b04..9d392c64fb16199ae90568ed498bd4d49ec9c7a2 100644 --- a/source/dnode/mnode/impl/src/mndTrans.c +++ b/source/dnode/mnode/impl/src/mndTrans.c @@ -140,6 +140,7 @@ static SSdbRaw *mndTransActionEncode(STrans *pTrans) { SDB_SET_INT16(pRaw, dataPos, stage, _OVER) SDB_SET_INT16(pRaw, dataPos, pTrans->policy, _OVER) SDB_SET_INT16(pRaw, dataPos, pTrans->type, _OVER) + SDB_SET_INT16(pRaw, dataPos, pTrans->parallel, _OVER) SDB_SET_INT64(pRaw, dataPos, pTrans->createdTime, _OVER) SDB_SET_INT64(pRaw, dataPos, pTrans->dbUid, _OVER) SDB_SET_BINARY(pRaw, dataPos, pTrans->dbname, TSDB_DB_FNAME_LEN, _OVER) @@ -245,12 +246,15 @@ static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw) { int16_t stage = 0; int16_t policy = 0; int16_t type = 0; + int16_t parallel = 0; SDB_GET_INT16(pRaw, dataPos, &stage, _OVER) SDB_GET_INT16(pRaw, dataPos, &policy, _OVER) SDB_GET_INT16(pRaw, dataPos, &type, _OVER) + SDB_GET_INT16(pRaw, dataPos, ¶llel, _OVER) pTrans->stage = stage; pTrans->policy = policy; pTrans->type = type; + pTrans->parallel = parallel; SDB_GET_INT64(pRaw, dataPos, &pTrans->createdTime, _OVER) SDB_GET_INT64(pRaw, dataPos, &pTrans->dbUid, _OVER) SDB_GET_BINARY(pRaw, dataPos, pTrans->dbname, TSDB_DB_FNAME_LEN, _OVER) @@ -460,15 +464,15 @@ static void mndTransTestStopFunc(SMnode *pMnode, void *param, int32_t paramLen) mInfo("test trans stop, param:%s, len:%d", (char *)param, paramLen); } -static TransCbFp mndTransGetCbFp(ETrnFuncType ftype) { +static TransCbFp mndTransGetCbFp(ETrnFunc ftype) { switch (ftype) { - case TEST_TRANS_START_FUNC: + case TRANS_START_FUNC_TEST: return mndTransTestStartFunc; - case TEST_TRANS_STOP_FUNC: + case TRANS_STOP_FUNC_TEST: return mndTransTestStopFunc; - case MQ_REB_TRANS_START_FUNC: + case TRANS_START_FUNC_MQ_REB: return mndRebCntInc; - case MQ_REB_TRANS_STOP_FUNC: + case TRANS_STOP_FUNC_TEST_MQ_REB: return mndRebCntDec; default: return NULL; @@ -563,7 +567,7 @@ STrans *mndTransCreate(SMnode *pMnode, ETrnPolicy policy, ETrnType type, const S pTrans->policy = policy; pTrans->type = type; pTrans->createdTime = taosGetTimestampMs(); - pTrans->rpcInfo = pReq->info; + if (pReq != NULL) pTrans->rpcInfo = pReq->info; pTrans->redoLogs = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(void *)); pTrans->undoLogs = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(void *)); pTrans->commitLogs = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(void *)); @@ -653,7 +657,7 @@ void mndTransSetRpcRsp(STrans *pTrans, void *pCont, int32_t contLen) { pTrans->rpcRspLen = contLen; } -void mndTransSetCb(STrans *pTrans, ETrnFuncType startFunc, ETrnFuncType stopFunc, void *param, int32_t paramLen) { +void mndTransSetCb(STrans *pTrans, ETrnFunc startFunc, ETrnFunc stopFunc, void *param, int32_t paramLen) { pTrans->startFunc = startFunc; pTrans->stopFunc = stopFunc; pTrans->param = param; @@ -665,6 +669,8 @@ void mndTransSetDbInfo(STrans *pTrans, SDbObj *pDb) { memcpy(pTrans->dbname, pDb->name, TSDB_DB_FNAME_LEN); } +void mndTransSetExecOneByOne(STrans *pTrans) { pTrans->parallel = TRN_EXEC_ONE_BY_ONE; } + static int32_t mndTransSync(SMnode *pMnode, STrans *pTrans) { SSdbRaw *pRaw = mndTransActionEncode(pTrans); if (pRaw == NULL) { @@ -674,21 +680,15 @@ static int32_t mndTransSync(SMnode *pMnode, STrans *pTrans) { sdbSetRawStatus(pRaw, SDB_STATUS_READY); mDebug("trans:%d, sync to other nodes", pTrans->id); - int32_t code = mndSyncPropose(pMnode, pRaw); + int32_t code = mndSyncPropose(pMnode, pRaw, pTrans->id); if (code != 0) { mError("trans:%d, failed to sync since %s", pTrans->id, terrstr()); sdbFreeRaw(pRaw); return -1; } + sdbFreeRaw(pRaw); mDebug("trans:%d, sync finished", pTrans->id); - - code = sdbWrite(pMnode->pSdb, pRaw); - if (code != 0) { - mError("trans:%d, failed to write sdb since %s", pTrans->id, terrstr()); - return -1; - } - return 0; } @@ -768,6 +768,12 @@ int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans) { return -1; } + if (taosArrayGetSize(pTrans->commitLogs) <= 0) { + terrno = TSDB_CODE_MND_TRANS_CLOG_IS_NULL; + mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr()); + return -1; + } + mDebug("trans:%d, prepare transaction", pTrans->id); if (mndTransSync(pMnode, pTrans) != 0) { mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr()); @@ -829,7 +835,7 @@ static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans) { sendRsp = true; } } else { - if (pTrans->stage == TRN_STAGE_REDO_ACTION && pTrans->failedTimes > 0) { + if (pTrans->stage == TRN_STAGE_REDO_ACTION && pTrans->failedTimes > 6) { if (code == 0) code = TSDB_CODE_MND_TRANS_UNKNOW_ERROR; sendRsp = true; } @@ -970,7 +976,18 @@ static int32_t mndTransSendActionMsg(SMnode *pMnode, STrans *pTrans, SArray *pAr for (int32_t action = 0; action < numOfActions; ++action) { STransAction *pAction = taosArrayGet(pArray, action); if (pAction == NULL) continue; - if (pAction->msgSent) continue; + + if (pAction->msgSent) { + if (pAction->msgReceived) { + continue; + } else { + if (pTrans->parallel == TRN_EXEC_ONE_BY_ONE) { + break; + } else { + continue; + } + } + } int64_t signature = pTrans->id; signature = (signature << 32); @@ -990,6 +1007,9 @@ static int32_t mndTransSendActionMsg(SMnode *pMnode, STrans *pTrans, SArray *pAr pAction->msgSent = 1; pAction->msgReceived = 0; pAction->errCode = 0; + if (pTrans->parallel == TRN_EXEC_ONE_BY_ONE) { + break; + } } else { pAction->msgSent = 0; pAction->msgReceived = 0; @@ -1080,6 +1100,8 @@ static bool mndTransPerformRedoLogStage(SMnode *pMnode, STrans *pTrans) { } static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans) { + if (!pMnode->deploy && !mndIsMaster(pMnode)) return false; + bool continueExec = true; int32_t code = mndTransExecuteRedoActions(pMnode, pTrans); @@ -1169,6 +1191,8 @@ static bool mndTransPerformUndoLogStage(SMnode *pMnode, STrans *pTrans) { } static bool mndTransPerformUndoActionStage(SMnode *pMnode, STrans *pTrans) { + if (!pMnode->deploy && !mndIsMaster(pMnode)) return false; + bool continueExec = true; int32_t code = mndTransExecuteUndoActions(pMnode, pTrans); @@ -1350,19 +1374,35 @@ _OVER: return code; } +static int32_t mndCompareTransId(int32_t *pTransId1, int32_t *pTransId2) { return *pTransId1 >= *pTransId2 ? 1 : 0; } + void mndTransPullup(SMnode *pMnode) { - STrans *pTrans = NULL; - void *pIter = NULL; + SSdb *pSdb = pMnode->pSdb; + SArray *pArray = taosArrayInit(sdbGetSize(pSdb, SDB_TRANS), sizeof(int32_t)); + if (pArray == NULL) return; + void *pIter = NULL; while (1) { + STrans *pTrans = NULL; pIter = sdbFetch(pMnode->pSdb, SDB_TRANS, pIter, (void **)&pTrans); if (pIter == NULL) break; + taosArrayPush(pArray, &pTrans->id); + sdbRelease(pSdb, pTrans); + } - mndTransExecute(pMnode, pTrans); - sdbRelease(pMnode->pSdb, pTrans); + taosArraySort(pArray, (__compar_fn_t)mndCompareTransId); + + for (int32_t i = 0; i < taosArrayGetSize(pArray); ++i) { + int32_t *pTransId = taosArrayGet(pArray, i); + STrans *pTrans = mndAcquireTrans(pMnode, *pTransId); + if (pTrans != NULL) { + mndTransExecute(pMnode, pTrans); + } + mndReleaseTrans(pMnode, pTrans); } sdbWriteFile(pMnode->pSdb); + taosArrayDestroy(pArray); } static int32_t mndRetrieveTrans(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows) { diff --git a/source/dnode/mnode/impl/src/mndUser.c b/source/dnode/mnode/impl/src/mndUser.c index 88e646e76548bc0db5ed2af9e6e26ceb489c5cd0..cc6364c4571b7b56b096d282c4f8f29a7b624dca 100644 --- a/source/dnode/mnode/impl/src/mndUser.c +++ b/source/dnode/mnode/impl/src/mndUser.c @@ -78,7 +78,33 @@ static int32_t mndCreateDefaultUser(SMnode *pMnode, char *acct, char *user, char sdbSetRawStatus(pRaw, SDB_STATUS_READY); mDebug("user:%s, will be created while deploy sdb, raw:%p", userObj.user, pRaw); + +#if 0 return sdbWrite(pMnode->pSdb, pRaw); +#else + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_CREATE_USER, NULL); + if (pTrans == NULL) { + mError("user:%s, failed to create since %s", userObj.user, terrstr()); + return -1; + } + mDebug("trans:%d, used to create user:%s", pTrans->id, userObj.user); + + if (mndTransAppendCommitlog(pTrans, pRaw) != 0) { + mError("trans:%d, failed to commit redo log since %s", pTrans->id, terrstr()); + mndTransDrop(pTrans); + return -1; + } + sdbSetRawStatus(pRaw, SDB_STATUS_READY); + + if (mndTransPrepare(pMnode, pTrans) != 0) { + mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr()); + mndTransDrop(pTrans); + return -1; + } + + mndTransDrop(pTrans); + return 0; +#endif } static int32_t mndCreateDefaultUsers(SMnode *pMnode) { @@ -272,13 +298,13 @@ static int32_t mndCreateUser(SMnode *pMnode, char *acct, SCreateUserReq *pCreate } mDebug("trans:%d, used to create user:%s", pTrans->id, pCreate->user); - SSdbRaw *pRedoRaw = mndUserActionEncode(&userObj); - if (pRedoRaw == NULL || mndTransAppendRedolog(pTrans, pRedoRaw) != 0) { - mError("trans:%d, failed to append redo log since %s", pTrans->id, terrstr()); + SSdbRaw *pCommitRaw = mndUserActionEncode(&userObj); + if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) { + mError("trans:%d, failed to commit redo log since %s", pTrans->id, terrstr()); mndTransDrop(pTrans); return -1; } - sdbSetRawStatus(pRedoRaw, SDB_STATUS_READY); + sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY); if (mndTransPrepare(pMnode, pTrans) != 0) { mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr()); @@ -352,13 +378,13 @@ static int32_t mndAlterUser(SMnode *pMnode, SUserObj *pOld, SUserObj *pNew, SRpc } mDebug("trans:%d, used to alter user:%s", pTrans->id, pOld->user); - SSdbRaw *pRedoRaw = mndUserActionEncode(pNew); - if (pRedoRaw == NULL || mndTransAppendRedolog(pTrans, pRedoRaw) != 0) { - mError("trans:%d, failed to append redo log since %s", pTrans->id, terrstr()); + SSdbRaw *pCommitRaw = mndUserActionEncode(pNew); + if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) { + mError("trans:%d, failed to append commit log since %s", pTrans->id, terrstr()); mndTransDrop(pTrans); return -1; } - sdbSetRawStatus(pRedoRaw, SDB_STATUS_READY); + sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY); if (mndTransPrepare(pMnode, pTrans) != 0) { mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr()); @@ -559,13 +585,13 @@ static int32_t mndDropUser(SMnode *pMnode, SRpcMsg *pReq, SUserObj *pUser) { } mDebug("trans:%d, used to drop user:%s", pTrans->id, pUser->user); - SSdbRaw *pRedoRaw = mndUserActionEncode(pUser); - if (pRedoRaw == NULL || mndTransAppendRedolog(pTrans, pRedoRaw) != 0) { - mError("trans:%d, failed to append redo log since %s", pTrans->id, terrstr()); + SSdbRaw *pCommitRaw = mndUserActionEncode(pUser); + if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) { + mError("trans:%d, failed to append commit log since %s", pTrans->id, terrstr()); mndTransDrop(pTrans); return -1; } - sdbSetRawStatus(pRedoRaw, SDB_STATUS_DROPPED); + sdbSetRawStatus(pCommitRaw, SDB_STATUS_DROPPED); if (mndTransPrepare(pMnode, pTrans) != 0) { mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr()); diff --git a/source/dnode/mnode/impl/src/mndVgroup.c b/source/dnode/mnode/impl/src/mndVgroup.c index 62021c6a7edc467bd7cd62fba9ef9eddbef1193b..e05b38a7c0345293eb53caeab2eb680f6d113651 100644 --- a/source/dnode/mnode/impl/src/mndVgroup.c +++ b/source/dnode/mnode/impl/src/mndVgroup.c @@ -80,6 +80,7 @@ SSdbRaw *mndVgroupActionEncode(SVgObj *pVgroup) { SDB_SET_INT32(pRaw, dataPos, pVgroup->hashEnd, _OVER) SDB_SET_BINARY(pRaw, dataPos, pVgroup->dbName, TSDB_DB_FNAME_LEN, _OVER) SDB_SET_INT64(pRaw, dataPos, pVgroup->dbUid, _OVER) + SDB_SET_INT8(pRaw, dataPos, pVgroup->isTsma, _OVER) SDB_SET_INT8(pRaw, dataPos, pVgroup->replica, _OVER) for (int8_t i = 0; i < pVgroup->replica; ++i) { SVnodeGid *pVgid = &pVgroup->vnodeGid[i]; @@ -127,6 +128,7 @@ SSdbRow *mndVgroupActionDecode(SSdbRaw *pRaw) { SDB_GET_INT32(pRaw, dataPos, &pVgroup->hashEnd, _OVER) SDB_GET_BINARY(pRaw, dataPos, pVgroup->dbName, TSDB_DB_FNAME_LEN, _OVER) SDB_GET_INT64(pRaw, dataPos, &pVgroup->dbUid, _OVER) + SDB_GET_INT8(pRaw, dataPos, &pVgroup->isTsma, _OVER) SDB_GET_INT8(pRaw, dataPos, &pVgroup->replica, _OVER) for (int8_t i = 0; i < pVgroup->replica; ++i) { SVnodeGid *pVgid = &pVgroup->vnodeGid[i]; @@ -167,6 +169,7 @@ static int32_t mndVgroupActionUpdate(SSdb *pSdb, SVgObj *pOld, SVgObj *pNew) { pOld->hashBegin = pNew->hashBegin; pOld->hashEnd = pNew->hashEnd; pOld->replica = pNew->replica; + pOld->isTsma = pNew->isTsma; memcpy(pOld->vnodeGid, pNew->vnodeGid, TSDB_MAX_REPLICA * sizeof(SVnodeGid)); return 0; } @@ -426,6 +429,25 @@ static int32_t mndGetAvailableDnode(SMnode *pMnode, SVgObj *pVgroup, SArray *pAr return 0; } +int32_t mndAllocSmaVgroup(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup) { + SArray *pArray = mndBuildDnodesArray(pMnode); + if (pArray == NULL) return -1; + + pVgroup->vgId = sdbGetMaxId(pMnode->pSdb, SDB_VGROUP); + pVgroup->isTsma = 1; + pVgroup->createdTime = taosGetTimestampMs(); + pVgroup->updateTime = pVgroup->createdTime; + pVgroup->version = 1; + memcpy(pVgroup->dbName, pDb->name, TSDB_DB_FNAME_LEN); + pVgroup->dbUid = pDb->uid; + pVgroup->replica = 1; + + if (mndGetAvailableDnode(pMnode, pVgroup, pArray) != 0) return -1; + + mInfo("db:%s, sma vgId:%d is alloced", pDb->name, pVgroup->vgId); + return 0; +} + int32_t mndAllocVgroup(SMnode *pMnode, SDbObj *pDb, SVgObj **ppVgroups) { int32_t code = -1; SArray *pArray = NULL; @@ -702,9 +724,12 @@ static int32_t mndRetrieveVgroups(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *p pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppendNULL(pColInfo, numOfRows); - pColInfo = taosArrayGet(pBlock->pDataBlock, cols); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppendNULL(pColInfo, numOfRows); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, (const char *)&pVgroup->isTsma, false); + numOfRows++; sdbRelease(pSdb, pVgroup); } diff --git a/source/dnode/mnode/impl/test/acct/CMakeLists.txt b/source/dnode/mnode/impl/test/acct/CMakeLists.txt index 40f8b0726e28446170a71bbbccde979376448fbb..d72292e34bd605ec91b16788fadd9f1ff1c68cc4 100644 --- a/source/dnode/mnode/impl/test/acct/CMakeLists.txt +++ b/source/dnode/mnode/impl/test/acct/CMakeLists.txt @@ -5,7 +5,9 @@ target_link_libraries( PUBLIC sut ) -add_test( - NAME acctTest - COMMAND acctTest -) +if(NOT TD_WINDOWS) + add_test( + NAME acctTest + COMMAND acctTest + ) +endif(NOT TD_WINDOWS) diff --git a/source/dnode/mnode/impl/test/acct/acct.cpp b/source/dnode/mnode/impl/test/acct/acct.cpp index 6dcb931ed5e59c0a98904792cf25dfe762248ee7..46a9a465ebe631665fa753c6de17569160a158d1 100644 --- a/source/dnode/mnode/impl/test/acct/acct.cpp +++ b/source/dnode/mnode/impl/test/acct/acct.cpp @@ -13,7 +13,7 @@ class MndTestAcct : public ::testing::Test { protected: - static void SetUpTestSuite() { test.Init("/tmp/acctTest", 9012); } + static void SetUpTestSuite() { test.Init(TD_TMP_DIR_PATH "acctTest", 9012); } static void TearDownTestSuite() { test.Cleanup(); } static Testbase test; diff --git a/source/dnode/mnode/impl/test/bnode/mbnode.cpp b/source/dnode/mnode/impl/test/bnode/mbnode.cpp index 316ac8cc3628e3b8cf747ab2e814c8dd6fcd50ff..c93e2142d02de318da323fa00ee7c04531d58874 100644 --- a/source/dnode/mnode/impl/test/bnode/mbnode.cpp +++ b/source/dnode/mnode/impl/test/bnode/mbnode.cpp @@ -18,11 +18,11 @@ class MndTestBnode : public ::testing::Test { public: static void SetUpTestSuite() { - test.Init("/tmp/mnode_test_bnode1", 9018); + test.Init(TD_TMP_DIR_PATH "mnode_test_bnode1", 9018); const char* fqdn = "localhost"; const char* firstEp = "localhost:9018"; - server2.Start("/tmp/mnode_test_bnode2", 9019); + server2.Start(TD_TMP_DIR_PATH "mnode_test_bnode2", 9019); taosMsleep(300); } diff --git a/source/dnode/mnode/impl/test/db/db.cpp b/source/dnode/mnode/impl/test/db/db.cpp index 545f9f22bb869dbc432e81620425905ed89f73f6..a1bab5d1d4786bdcfba5774f657264f46e6f471c 100644 --- a/source/dnode/mnode/impl/test/db/db.cpp +++ b/source/dnode/mnode/impl/test/db/db.cpp @@ -13,7 +13,7 @@ class MndTestDb : public ::testing::Test { protected: - static void SetUpTestSuite() { test.Init("/tmp/mnode_test_db", 9030); } + static void SetUpTestSuite() { test.Init(TD_TMP_DIR_PATH "mnode_test_db", 9030); } static void TearDownTestSuite() { test.Cleanup(); } static Testbase test; diff --git a/source/dnode/mnode/impl/test/dnode/mdnode.cpp b/source/dnode/mnode/impl/test/dnode/mdnode.cpp index e63536d4940a4779aa50a8947102eaf7c6904b34..0b42b2821988a4fe560d9d4660c8c70a3e9cbb4b 100644 --- a/source/dnode/mnode/impl/test/dnode/mdnode.cpp +++ b/source/dnode/mnode/impl/test/dnode/mdnode.cpp @@ -18,14 +18,14 @@ class MndTestDnode : public ::testing::Test { public: static void SetUpTestSuite() { - test.Init("/tmp/dnode_test_dnode1", 9023); + test.Init(TD_TMP_DIR_PATH "dnode_test_dnode1", 9023); const char* fqdn = "localhost"; const char* firstEp = "localhost:9023"; - // server2.Start("/tmp/dnode_test_dnode2", fqdn, 9024, firstEp); - // server3.Start("/tmp/dnode_test_dnode3", fqdn, 9025, firstEp); - // server4.Start("/tmp/dnode_test_dnode4", fqdn, 9026, firstEp); - // server5.Start("/tmp/dnode_test_dnode5", fqdn, 9027, firstEp); + // server2.Start(TD_TMP_DIR_PATH "dnode_test_dnode2", fqdn, 9024, firstEp); + // server3.Start(TD_TMP_DIR_PATH "dnode_test_dnode3", fqdn, 9025, firstEp); + // server4.Start(TD_TMP_DIR_PATH "dnode_test_dnode4", fqdn, 9026, firstEp); + // server5.Start(TD_TMP_DIR_PATH "dnode_test_dnode5", fqdn, 9027, firstEp); taosMsleep(300); } diff --git a/source/dnode/mnode/impl/test/func/CMakeLists.txt b/source/dnode/mnode/impl/test/func/CMakeLists.txt index ecb4f851be9d95a7c894d1e2ef2b3d9ce83067d3..2a8eb0a39d89275ae204e6405de2b774b4412619 100644 --- a/source/dnode/mnode/impl/test/func/CMakeLists.txt +++ b/source/dnode/mnode/impl/test/func/CMakeLists.txt @@ -5,7 +5,9 @@ target_link_libraries( PUBLIC sut ) -add_test( - NAME funcTest - COMMAND funcTest -) +if(NOT TD_WINDOWS) + add_test( + NAME funcTest + COMMAND funcTest + ) +endif(NOT TD_WINDOWS) diff --git a/source/dnode/mnode/impl/test/func/func.cpp b/source/dnode/mnode/impl/test/func/func.cpp index c8f832160b4addc852fcf3f610dcd8d608cfef29..2bebe7ef199c8e61d1204a611b84b66051002087 100644 --- a/source/dnode/mnode/impl/test/func/func.cpp +++ b/source/dnode/mnode/impl/test/func/func.cpp @@ -13,7 +13,7 @@ class MndTestFunc : public ::testing::Test { protected: - static void SetUpTestSuite() { test.Init("/tmp/mnode_test_func", 9038); } + static void SetUpTestSuite() { test.Init(TD_TMP_DIR_PATH "mnode_test_func", 9038); } static void TearDownTestSuite() { test.Cleanup(); } static Testbase test; diff --git a/source/dnode/mnode/impl/test/mnode/mnode.cpp b/source/dnode/mnode/impl/test/mnode/mnode.cpp index d953bdfdcba16c1987c2fb48980b2989b56bc400..1ed613c723ca504777929b8a6715e9d3b7594e06 100644 --- a/source/dnode/mnode/impl/test/mnode/mnode.cpp +++ b/source/dnode/mnode/impl/test/mnode/mnode.cpp @@ -18,11 +18,11 @@ class MndTestMnode : public ::testing::Test { public: static void SetUpTestSuite() { - test.Init("/tmp/mnode_test_mnode1", 9028); + test.Init(TD_TMP_DIR_PATH "mnode_test_mnode1", 9028); const char* fqdn = "localhost"; const char* firstEp = "localhost:9028"; - // server2.Start("/tmp/mnode_test_mnode2", fqdn, 9029, firstEp); + // server2.Start(TD_TMP_DIR_PATH "mnode_test_mnode2", fqdn, 9029, firstEp); taosMsleep(300); } diff --git a/source/dnode/mnode/impl/test/profile/CMakeLists.txt b/source/dnode/mnode/impl/test/profile/CMakeLists.txt index 8b811ebfed3a56ab139ecfc81f3556af2f9bb032..b6586192b2b4c6e428c2f00fddb11527a1747707 100644 --- a/source/dnode/mnode/impl/test/profile/CMakeLists.txt +++ b/source/dnode/mnode/impl/test/profile/CMakeLists.txt @@ -5,7 +5,9 @@ target_link_libraries( PUBLIC sut ) -add_test( - NAME profileTest - COMMAND profileTest -) +if(NOT TD_WINDOWS) + add_test( + NAME profileTest + COMMAND profileTest + ) +endif(NOT TD_WINDOWS) diff --git a/source/dnode/mnode/impl/test/profile/profile.cpp b/source/dnode/mnode/impl/test/profile/profile.cpp index 9c8e0298aa336ee5634480395a9e33ce3390d065..794374a91d51dfa9746028cebab6af3e394f2798 100644 --- a/source/dnode/mnode/impl/test/profile/profile.cpp +++ b/source/dnode/mnode/impl/test/profile/profile.cpp @@ -13,7 +13,7 @@ class MndTestProfile : public ::testing::Test { protected: - static void SetUpTestSuite() { test.Init("/tmp/mnode_test_profile", 9031); } + static void SetUpTestSuite() { test.Init(TD_TMP_DIR_PATH "mnode_test_profile", 9031); } static void TearDownTestSuite() { test.Cleanup(); } static Testbase test; diff --git a/source/dnode/mnode/impl/test/qnode/qnode.cpp b/source/dnode/mnode/impl/test/qnode/qnode.cpp index 87ba7caa4edf888057aad3d3b1ab1e157e54b5e4..57b38e55c1d392bda809b094b0f1bf918aae8888 100644 --- a/source/dnode/mnode/impl/test/qnode/qnode.cpp +++ b/source/dnode/mnode/impl/test/qnode/qnode.cpp @@ -18,11 +18,11 @@ class MndTestQnode : public ::testing::Test { public: static void SetUpTestSuite() { - test.Init("/tmp/mnode_test_qnode1", 9014); + test.Init(TD_TMP_DIR_PATH "mnode_test_qnode1", 9014); const char* fqdn = "localhost"; const char* firstEp = "localhost:9014"; - // server2.Start("/tmp/mnode_test_qnode2", fqdn, 9015, firstEp); + // server2.Start(TD_TMP_DIR_PATH "mnode_test_qnode2", fqdn, 9015, firstEp); taosMsleep(300); } diff --git a/source/dnode/mnode/impl/test/sdb/sdbTest.cpp b/source/dnode/mnode/impl/test/sdb/sdbTest.cpp index b93adf99305492fec346a1fb981aef7e4e55979f..43be55dd1de822d098475747a7b5b6452f379058 100644 --- a/source/dnode/mnode/impl/test/sdb/sdbTest.cpp +++ b/source/dnode/mnode/impl/test/sdb/sdbTest.cpp @@ -31,7 +31,7 @@ class MndTestSdb : public ::testing::Test { tsLogEmbedded = 1; tsAsyncLog = 0; - const char *path = "/tmp/td"; + const char *path = TD_TMP_DIR_PATH "td"; taosRemoveDir(path); taosMkDir(path); tstrncpy(tsLogDir, path, PATH_MAX); @@ -385,7 +385,7 @@ TEST_F(MndTestSdb, 01_Write_Str) { mnode.v100 = 100; mnode.v200 = 200; opt.pMnode = &mnode; - opt.path = "/tmp/mnode_test_sdb"; + opt.path = TD_TMP_DIR_PATH "mnode_test_sdb"; taosRemoveDir(opt.path); SSdbTable strTable1; @@ -492,10 +492,9 @@ TEST_F(MndTestSdb, 01_Write_Str) { ASSERT_EQ(sdbGetSize(pSdb, SDB_USER), 2); ASSERT_EQ(sdbGetMaxId(pSdb, SDB_USER), -1); - ASSERT_EQ(sdbGetTableVer(pSdb, SDB_USER), 2 ); - ASSERT_EQ(sdbUpdateVer(pSdb, 0), -1); - ASSERT_EQ(sdbUpdateVer(pSdb, 1), 0); - ASSERT_EQ(sdbUpdateVer(pSdb, -1), -1); + ASSERT_EQ(sdbGetTableVer(pSdb, SDB_USER), 2); + sdbSetApplyIndex(pSdb, -1); + ASSERT_EQ(sdbGetApplyIndex(pSdb), -1); ASSERT_EQ(mnode.insertTimes, 2); ASSERT_EQ(mnode.deleteTimes, 0); @@ -537,9 +536,6 @@ TEST_F(MndTestSdb, 01_Write_Str) { ASSERT_EQ(sdbGetSize(pSdb, SDB_USER), 3); ASSERT_EQ(sdbGetTableVer(pSdb, SDB_USER), 4); - ASSERT_EQ(sdbUpdateVer(pSdb, 0), -1); - ASSERT_EQ(sdbUpdateVer(pSdb, 1), 0); - ASSERT_EQ(sdbUpdateVer(pSdb, -1), -1); ASSERT_EQ(mnode.insertTimes, 3); ASSERT_EQ(mnode.deleteTimes, 0); @@ -704,8 +700,9 @@ TEST_F(MndTestSdb, 01_Write_Str) { } // write version - ASSERT_EQ(sdbUpdateVer(pSdb, 1), 0); - ASSERT_EQ(sdbUpdateVer(pSdb, 1), 1); + sdbSetApplyIndex(pSdb, 0); + sdbSetApplyIndex(pSdb, 1); + ASSERT_EQ(sdbGetApplyIndex(pSdb), 1); ASSERT_EQ(sdbWriteFile(pSdb), 0); ASSERT_EQ(sdbWriteFile(pSdb), 0); @@ -730,7 +727,7 @@ TEST_F(MndTestSdb, 01_Read_Str) { mnode.v100 = 100; mnode.v200 = 200; opt.pMnode = &mnode; - opt.path = "/tmp/mnode_test_sdb"; + opt.path = TD_TMP_DIR_PATH "mnode_test_sdb"; SSdbTable strTable1; memset(&strTable1, 0, sizeof(SSdbTable)); @@ -775,7 +772,7 @@ TEST_F(MndTestSdb, 01_Read_Str) { ASSERT_EQ(sdbGetSize(pSdb, SDB_USER), 2); ASSERT_EQ(sdbGetMaxId(pSdb, SDB_USER), -1); ASSERT_EQ(sdbGetTableVer(pSdb, SDB_USER), 5); - ASSERT_EQ(sdbUpdateVer(pSdb, 0), 1); + ASSERT_EQ(sdbGetApplyIndex(pSdb), 1); ASSERT_EQ(mnode.insertTimes, 4); ASSERT_EQ(mnode.deleteTimes, 0); @@ -898,7 +895,35 @@ TEST_F(MndTestSdb, 01_Read_Str) { ASSERT_EQ(code, TSDB_CODE_SDB_OBJ_CREATING); } + { + SSdbIter *pReader = NULL; + SSdbIter *pWritter = NULL; + void *pBuf = NULL; + int32_t len = 0; + int32_t code = 0; + + code = sdbStartRead(pSdb, &pReader); + ASSERT_EQ(code, 0); + code = sdbStartWrite(pSdb, &pWritter); + ASSERT_EQ(code, 0); + + while (sdbDoRead(pSdb, pReader, &pBuf, &len) == 0) { + if (pBuf != NULL && len != 0) { + sdbDoWrite(pSdb, pWritter, pBuf, len); + taosMemoryFree(pBuf); + } else { + break; + } + } + + sdbStopRead(pSdb, pReader); + sdbStopWrite(pSdb, pWritter, true); + } + + ASSERT_EQ(sdbGetSize(pSdb, SDB_CONSUMER), 1); + ASSERT_EQ(sdbGetTableVer(pSdb, SDB_CONSUMER), 4); + sdbCleanup(pSdb); - ASSERT_EQ(mnode.insertTimes, 5); - ASSERT_EQ(mnode.deleteTimes, 5); + ASSERT_EQ(mnode.insertTimes, 9); + ASSERT_EQ(mnode.deleteTimes, 9); } \ No newline at end of file diff --git a/source/dnode/mnode/impl/test/show/CMakeLists.txt b/source/dnode/mnode/impl/test/show/CMakeLists.txt index 69e93e7086147de77676ea02017a6ce5533acf42..9b4e21501ed478e527adfa69a5a2297e173876e1 100644 --- a/source/dnode/mnode/impl/test/show/CMakeLists.txt +++ b/source/dnode/mnode/impl/test/show/CMakeLists.txt @@ -5,7 +5,9 @@ target_link_libraries( PUBLIC sut ) -add_test( - NAME showTest - COMMAND showTest -) +if(NOT TD_WINDOWS) + add_test( + NAME showTest + COMMAND showTest + ) +endif(NOT TD_WINDOWS) diff --git a/source/dnode/mnode/impl/test/show/show.cpp b/source/dnode/mnode/impl/test/show/show.cpp index 5c431f65d3a5eb5663eb3a92005a4635e6f7f384..0de8c9dca8cc69ce46bc41d439571fa8c7ad046b 100644 --- a/source/dnode/mnode/impl/test/show/show.cpp +++ b/source/dnode/mnode/impl/test/show/show.cpp @@ -13,7 +13,7 @@ class MndTestShow : public ::testing::Test { protected: - static void SetUpTestSuite() { test.Init("/tmp/mnode_test_show", 9021); } + static void SetUpTestSuite() { test.Init(TD_TMP_DIR_PATH "mnode_test_show", 9021); } static void TearDownTestSuite() { test.Cleanup(); } static Testbase test; diff --git a/source/dnode/mnode/impl/test/sma/sma.cpp b/source/dnode/mnode/impl/test/sma/sma.cpp index 4dc4e0477947d647961acd1859720f1013d6da45..d795816f57b5e65a7bedaef6c14af6db84680703 100644 --- a/source/dnode/mnode/impl/test/sma/sma.cpp +++ b/source/dnode/mnode/impl/test/sma/sma.cpp @@ -13,7 +13,7 @@ class MndTestSma : public ::testing::Test { protected: - static void SetUpTestSuite() { test.Init("/tmp/mnode_test_sma", 9035); } + static void SetUpTestSuite() { test.Init(TD_TMP_DIR_PATH "mnode_test_sma", 9035); } static void TearDownTestSuite() { test.Cleanup(); } static Testbase test; diff --git a/source/dnode/mnode/impl/test/snode/snode.cpp b/source/dnode/mnode/impl/test/snode/snode.cpp index 0b1d3c38b25a204902e2b884c22006c89476dfb3..1828fbd570499107e3e3131631c35602bead983f 100644 --- a/source/dnode/mnode/impl/test/snode/snode.cpp +++ b/source/dnode/mnode/impl/test/snode/snode.cpp @@ -18,11 +18,11 @@ class MndTestSnode : public ::testing::Test { public: static void SetUpTestSuite() { - test.Init("/tmp/mnode_test_snode1", 9016); + test.Init(TD_TMP_DIR_PATH "mnode_test_snode1", 9016); const char* fqdn = "localhost"; const char* firstEp = "localhost:9016"; - // server2.Start("/tmp/mnode_test_snode2", fqdn, 9017, firstEp); + // server2.Start(TD_TMP_DIR_PATH "mnode_test_snode2", fqdn, 9017, firstEp); taosMsleep(300); } diff --git a/source/dnode/mnode/impl/test/stb/stb.cpp b/source/dnode/mnode/impl/test/stb/stb.cpp index b8873210ab995bde06f6c2baf17d14975bc591e1..56b8936cf44f5520d1e72dfce8f0877fa4be6684 100644 --- a/source/dnode/mnode/impl/test/stb/stb.cpp +++ b/source/dnode/mnode/impl/test/stb/stb.cpp @@ -13,7 +13,7 @@ class MndTestStb : public ::testing::Test { protected: - static void SetUpTestSuite() { test.Init("/tmp/mnode_test_stb", 9034); } + static void SetUpTestSuite() { test.Init(TD_TMP_DIR_PATH "mnode_test_stb", 9034); } static void TearDownTestSuite() { test.Cleanup(); } static Testbase test; @@ -277,7 +277,8 @@ void* MndTestStb::BuildAlterStbUpdateColumnBytesReq(const char* stbname, const c req.numOfFields = 1; req.pFields = taosArrayInit(1, sizeof(SField)); req.alterType = TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES; - req.verInBlock = verInBlock; + req.tagVer = verInBlock; + req.colVer = verInBlock; SField field = {0}; field.bytes = bytes; @@ -343,7 +344,7 @@ TEST_F(MndTestStb, 01_Create_Show_Meta_Drop_Restart_Stb) { EXPECT_EQ(metaRsp.precision, TSDB_TIME_PRECISION_MILLI); EXPECT_EQ(metaRsp.tableType, TSDB_SUPER_TABLE); EXPECT_EQ(metaRsp.sversion, 1); - EXPECT_EQ(metaRsp.tversion, 0); + EXPECT_EQ(metaRsp.tversion, 1); EXPECT_GT(metaRsp.suid, 0); EXPECT_GT(metaRsp.tuid, 0); EXPECT_EQ(metaRsp.vgId, 0); diff --git a/source/dnode/mnode/impl/test/topic/topic.cpp b/source/dnode/mnode/impl/test/topic/topic.cpp index eccc1b99d3f3dc8189712776e4ee852c6169cdfa..433a0ab5cc100d0fc96ac575a128f350df5e8bf3 100644 --- a/source/dnode/mnode/impl/test/topic/topic.cpp +++ b/source/dnode/mnode/impl/test/topic/topic.cpp @@ -13,7 +13,7 @@ class MndTestTopic : public ::testing::Test { protected: - static void SetUpTestSuite() { test.Init("/tmp/mnode_test_topic", 9039); } + static void SetUpTestSuite() { test.Init(TD_TMP_DIR_PATH "mnode_test_topic", 9039); } static void TearDownTestSuite() { test.Cleanup(); } static Testbase test; diff --git a/source/dnode/mnode/impl/test/trans/CMakeLists.txt b/source/dnode/mnode/impl/test/trans/CMakeLists.txt index 55fc3abbc26feec948aeffcd8168259b22c07068..22ff85563fab6119a0d35b36afeb1cd7aa450996 100644 --- a/source/dnode/mnode/impl/test/trans/CMakeLists.txt +++ b/source/dnode/mnode/impl/test/trans/CMakeLists.txt @@ -31,7 +31,7 @@ target_include_directories( PUBLIC "${TD_SOURCE_DIR}/include/dnode/mnode" PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/../../inc" ) -add_test( - NAME transTest2 - COMMAND transTest2 -) +#add_test( +# NAME transTest2 +# COMMAND transTest2 +#) diff --git a/source/dnode/mnode/impl/test/trans/trans1.cpp b/source/dnode/mnode/impl/test/trans/trans1.cpp index 80109a39b24c452f1a656b4bf172c8e2ad38acd5..5a470fc900f418c429befe12d6cfae643522a2e3 100644 --- a/source/dnode/mnode/impl/test/trans/trans1.cpp +++ b/source/dnode/mnode/impl/test/trans/trans1.cpp @@ -14,10 +14,10 @@ class MndTestTrans1 : public ::testing::Test { protected: static void SetUpTestSuite() { - test.Init("/tmp/mnode_test_trans1", 9013); + test.Init(TD_TMP_DIR_PATH "mnode_test_trans1", 9013); const char* fqdn = "localhost"; const char* firstEp = "localhost:9013"; - // server2.Start("/tmp/mnode_test_trans2", fqdn, 9020, firstEp); + // server2.Start(TD_TMP_DIR_PATH "mnode_test_trans2", fqdn, 9020, firstEp); } static void TearDownTestSuite() { @@ -26,7 +26,7 @@ class MndTestTrans1 : public ::testing::Test { } static void KillThenRestartServer() { - char file[PATH_MAX] = "/tmp/mnode_test_trans1/mnode/data/sdb.data"; + char file[PATH_MAX] = TD_TMP_DIR_PATH "mnode_test_trans1/mnode/data/sdb.data"; TdFilePtr pFile = taosOpenFile(file, TD_FILE_READ); int32_t size = 3 * 1024 * 1024; void* buffer = taosMemoryMalloc(size); diff --git a/source/dnode/mnode/impl/test/trans/trans2.cpp b/source/dnode/mnode/impl/test/trans/trans2.cpp index c4ed48fe60f069b05ca445c271a54d53bff810da..cfcfc2490e022092386b64f859befe4b1b922c80 100644 --- a/source/dnode/mnode/impl/test/trans/trans2.cpp +++ b/source/dnode/mnode/impl/test/trans/trans2.cpp @@ -23,6 +23,11 @@ int32_t sendReq(const SEpSet *pEpSet, SRpcMsg *pMsg) { return -1; } +int32_t putToQueue(void *pMgmt, SRpcMsg *pMsg) { + terrno = TSDB_CODE_INVALID_PTR; + return -1; +} + class MndTestTrans2 : public ::testing::Test { protected: static void InitLog() { @@ -41,7 +46,7 @@ class MndTestTrans2 : public ::testing::Test { tsLogEmbedded = 1; tsAsyncLog = 0; - const char *logpath = "/tmp/td"; + const char *logpath = TD_TMP_DIR_PATH "td"; taosRemoveDir(logpath); taosMkDir(logpath); tstrncpy(tsLogDir, logpath, PATH_MAX); @@ -55,6 +60,9 @@ class MndTestTrans2 : public ::testing::Test { msgCb.reportStartupFp = reportStartup; msgCb.sendReqFp = sendReq; msgCb.sendRspFp = sendRsp; + msgCb.queueFps[SYNC_QUEUE] = putToQueue; + msgCb.queueFps[WRITE_QUEUE] = putToQueue; + msgCb.queueFps[READ_QUEUE] = putToQueue; msgCb.mgmt = (SMgmtWrapper *)(&msgCb); // hack tmsgSetDefault(&msgCb); @@ -68,7 +76,7 @@ class MndTestTrans2 : public ::testing::Test { tsTransPullupInterval = 1; - const char *mnodepath = "/tmp/mnode_test_trans"; + const char *mnodepath = TD_TMP_DIR_PATH "mnode_test_trans"; taosRemoveDir(mnodepath); pMnode = mndOpen(mnodepath, &opt); mndStart(pMnode); @@ -77,6 +85,7 @@ class MndTestTrans2 : public ::testing::Test { static void SetUpTestSuite() { InitLog(); walInit(); + syncInit(); InitMnode(); } @@ -114,7 +123,7 @@ class MndTestTrans2 : public ::testing::Test { sdbSetRawStatus(pUndoRaw, SDB_STATUS_DROPPED); char *param = strdup("====> test log <====="); - mndTransSetCb(pTrans, TEST_TRANS_START_FUNC, TEST_TRANS_STOP_FUNC, param, strlen(param) + 1); + mndTransSetCb(pTrans, TRANS_START_FUNC_TEST, TRANS_STOP_FUNC_TEST, param, strlen(param) + 1); if (pDb != NULL) { mndTransSetDbInfo(pTrans, pDb); @@ -147,7 +156,7 @@ class MndTestTrans2 : public ::testing::Test { sdbSetRawStatus(pUndoRaw, SDB_STATUS_DROPPED); char *param = strdup("====> test action <====="); - mndTransSetCb(pTrans, TEST_TRANS_START_FUNC, TEST_TRANS_STOP_FUNC, param, strlen(param) + 1); + mndTransSetCb(pTrans, TRANS_START_FUNC_TEST, TRANS_STOP_FUNC_TEST, param, strlen(param) + 1); { STransAction action = {0}; @@ -219,7 +228,7 @@ class MndTestTrans2 : public ::testing::Test { sdbSetRawStatus(pUndoRaw, SDB_STATUS_DROPPED); char *param = strdup("====> test log <====="); - mndTransSetCb(pTrans, TEST_TRANS_START_FUNC, TEST_TRANS_STOP_FUNC, param, strlen(param) + 1); + mndTransSetCb(pTrans, TRANS_START_FUNC_TEST, TRANS_STOP_FUNC_TEST, param, strlen(param) + 1); int32_t code = mndTransPrepare(pMnode, pTrans); mndTransDrop(pTrans); diff --git a/source/dnode/mnode/impl/test/user/user.cpp b/source/dnode/mnode/impl/test/user/user.cpp index 9e4bd79274a9da6810d1ab7f03edcd170d348183..6aa28a9007e8bc8e7c45621eb7e29280da964823 100644 --- a/source/dnode/mnode/impl/test/user/user.cpp +++ b/source/dnode/mnode/impl/test/user/user.cpp @@ -13,7 +13,7 @@ class MndTestUser : public ::testing::Test { protected: - static void SetUpTestSuite() { test.Init("/tmp/mnode_test_user", 9011); } + static void SetUpTestSuite() { test.Init(TD_TMP_DIR_PATH "mnode_test_user", 9011); } static void TearDownTestSuite() { test.Cleanup(); } static Testbase test; diff --git a/source/dnode/mnode/sdb/CMakeLists.txt b/source/dnode/mnode/sdb/CMakeLists.txt index e2ebed7a788c58cb6bbe2ba384eeabeb5cf3f2f0..2001a70da217d67e8a3b63137f40fbce9eaf6192 100644 --- a/source/dnode/mnode/sdb/CMakeLists.txt +++ b/source/dnode/mnode/sdb/CMakeLists.txt @@ -2,8 +2,7 @@ aux_source_directory(src MNODE_SRC) add_library(sdb STATIC ${MNODE_SRC}) target_include_directories( sdb - PUBLIC "${TD_SOURCE_DIR}/include/dnode/mnode/sdb" - PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" + PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/inc" ) target_link_libraries( sdb os common util wal diff --git a/include/dnode/mnode/sdb/sdb.h b/source/dnode/mnode/sdb/inc/sdb.h similarity index 81% rename from include/dnode/mnode/sdb/sdb.h rename to source/dnode/mnode/sdb/inc/sdb.h index 2abe0e5c737c8dd52c92cc0e34a052f44155e298..c66b47a24b13f0c9efd55dc965743416737177ea 100644 --- a/include/dnode/mnode/sdb/sdb.h +++ b/source/dnode/mnode/sdb/inc/sdb.h @@ -27,6 +27,15 @@ extern "C" { #endif +// clang-format off +#define mFatal(...) { if (mDebugFlag & DEBUG_FATAL) { taosPrintLog("MND FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }} +#define mError(...) { if (mDebugFlag & DEBUG_ERROR) { taosPrintLog("MND ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} +#define mWarn(...) { if (mDebugFlag & DEBUG_WARN) { taosPrintLog("MND WARN ", DEBUG_WARN, 255, __VA_ARGS__); }} +#define mInfo(...) { if (mDebugFlag & DEBUG_INFO) { taosPrintLog("MND ", DEBUG_INFO, 255, __VA_ARGS__); }} +#define mDebug(...) { if (mDebugFlag & DEBUG_DEBUG) { taosPrintLog("MND ", DEBUG_DEBUG, mDebugFlag, __VA_ARGS__); }} +#define mTrace(...) { if (mDebugFlag & DEBUG_TRACE) { taosPrintLog("MND ", DEBUG_TRACE, mDebugFlag, __VA_ARGS__); }} +// clang-format on + #define SDB_GET_VAL(pData, dataPos, val, pos, func, type) \ { \ if (func(pRaw, dataPos, val) != 0) { \ @@ -44,12 +53,9 @@ extern "C" { } #define SDB_GET_INT64(pData, dataPos, val, pos) SDB_GET_VAL(pData, dataPos, val, pos, sdbGetRawInt64, int64_t) - #define SDB_GET_INT32(pData, dataPos, val, pos) SDB_GET_VAL(pData, dataPos, val, pos, sdbGetRawInt32, int32_t) - #define SDB_GET_INT16(pData, dataPos, val, pos) SDB_GET_VAL(pData, dataPos, val, pos, sdbGetRawInt16, int16_t) - -#define SDB_GET_INT8(pData, dataPos, val, pos) SDB_GET_VAL(pData, dataPos, val, pos, sdbGetRawInt8, int8_t) +#define SDB_GET_INT8(pData, dataPos, val, pos) SDB_GET_VAL(pData, dataPos, val, pos, sdbGetRawInt8, int8_t) #define SDB_GET_RESERVE(pRaw, dataPos, valLen, pos) \ { \ @@ -66,12 +72,9 @@ extern "C" { } #define SDB_SET_INT64(pRaw, dataPos, val, pos) SDB_SET_VAL(pRaw, dataPos, val, pos, sdbSetRawInt64, int64_t) - #define SDB_SET_INT32(pRaw, dataPos, val, pos) SDB_SET_VAL(pRaw, dataPos, val, pos, sdbSetRawInt32, int32_t) - #define SDB_SET_INT16(pRaw, dataPos, val, pos) SDB_SET_VAL(pRaw, dataPos, val, pos, sdbSetRawInt16, int16_t) - -#define SDB_SET_INT8(pRaw, dataPos, val, pos) SDB_SET_VAL(pRaw, dataPos, val, pos, sdbSetRawInt8, int8_t) +#define SDB_SET_INT8(pRaw, dataPos, val, pos) SDB_SET_VAL(pRaw, dataPos, val, pos, sdbSetRawInt8, int8_t) #define SDB_SET_BINARY(pRaw, dataPos, val, valLen, pos) \ { \ @@ -95,8 +98,16 @@ extern "C" { } typedef struct SMnode SMnode; +typedef struct SSdb SSdb; typedef struct SSdbRaw SSdbRaw; typedef struct SSdbRow SSdbRow; +typedef int32_t (*SdbInsertFp)(SSdb *pSdb, void *pObj); +typedef int32_t (*SdbUpdateFp)(SSdb *pSdb, void *pSrcObj, void *pDstObj); +typedef int32_t (*SdbDeleteFp)(SSdb *pSdb, void *pObj, bool callFunc); +typedef int32_t (*SdbDeployFp)(SMnode *pMnode); +typedef SSdbRow *(*SdbDecodeFp)(SSdbRaw *pRaw); +typedef SSdbRaw *(*SdbEncodeFp)(void *pObj); +typedef bool (*sdbTraverseFp)(SMnode *pMnode, void *pObj, void *p1, void *p2, void *p3); typedef enum { SDB_KEY_BINARY = 1, @@ -136,14 +147,48 @@ typedef enum { SDB_MAX = 20 } ESdbType; -typedef struct SSdb SSdb; -typedef int32_t (*SdbInsertFp)(SSdb *pSdb, void *pObj); -typedef int32_t (*SdbUpdateFp)(SSdb *pSdb, void *pSrcObj, void *pDstObj); -typedef int32_t (*SdbDeleteFp)(SSdb *pSdb, void *pObj, bool callFunc); -typedef int32_t (*SdbDeployFp)(SMnode *pMnode); -typedef SSdbRow *(*SdbDecodeFp)(SSdbRaw *pRaw); -typedef SSdbRaw *(*SdbEncodeFp)(void *pObj); -typedef bool (*sdbTraverseFp)(SMnode *pMnode, void *pObj, void *p1, void *p2, void *p3); +typedef struct SSdbRaw { + int8_t type; + int8_t status; + int8_t sver; + int8_t reserved; + int32_t dataLen; + char pData[]; +} SSdbRaw; + +typedef struct SSdbRow { + ESdbType type; + ESdbStatus status; + int32_t refCount; + char pObj[]; +} SSdbRow; + +typedef struct SSdb { + SMnode *pMnode; + char *currDir; + char *tmpDir; + int64_t lastCommitVer; + int64_t curVer; + int64_t curTerm; + int64_t tableVer[SDB_MAX]; + int64_t maxId[SDB_MAX]; + EKeyType keyTypes[SDB_MAX]; + SHashObj *hashObjs[SDB_MAX]; + TdThreadRwlock locks[SDB_MAX]; + SdbInsertFp insertFps[SDB_MAX]; + SdbUpdateFp updateFps[SDB_MAX]; + SdbDeleteFp deleteFps[SDB_MAX]; + SdbDeployFp deployFps[SDB_MAX]; + SdbEncodeFp encodeFps[SDB_MAX]; + SdbDecodeFp decodeFps[SDB_MAX]; + TdThreadMutex filelock; +} SSdb; + +typedef struct SSdbIter { + TdFilePtr file; + int64_t total; + char *name; +} SSdbIter; typedef struct { ESdbType sdbType; @@ -304,13 +349,16 @@ int32_t sdbGetMaxId(SSdb *pSdb, ESdbType type); int64_t sdbGetTableVer(SSdb *pSdb, ESdbType type); /** - * @brief Update the version of sdb + * @brief Update the index of sdb * * @param pSdb The sdb object. - * @param val The update value of the version. - * @return int32_t The current version of sdb + * @param index The update value of the apply index. + * @return int32_t The current index of sdb */ -int64_t sdbUpdateVer(SSdb *pSdb, int32_t val); +void sdbSetApplyIndex(SSdb *pSdb, int64_t index); +int64_t sdbGetApplyIndex(SSdb *pSdb); +void sdbSetApplyTerm(SSdb *pSdb, int64_t term); +int64_t sdbGetApplyTerm(SSdb *pSdb); SSdbRaw *sdbAllocRaw(ESdbType type, int8_t sver, int32_t dataLen); void sdbFreeRaw(SSdbRaw *pRaw); @@ -331,26 +379,19 @@ int32_t sdbGetRawTotalSize(SSdbRaw *pRaw); SSdbRow *sdbAllocRow(int32_t objSize); void *sdbGetRowObj(SSdbRow *pRow); +void sdbFreeRow(SSdb *pSdb, SSdbRow *pRow, bool callFunc); -typedef struct SSdb { - SMnode *pMnode; - char *currDir; - char *syncDir; - char *tmpDir; - int64_t lastCommitVer; - int64_t curVer; - int64_t tableVer[SDB_MAX]; - int64_t maxId[SDB_MAX]; - EKeyType keyTypes[SDB_MAX]; - SHashObj *hashObjs[SDB_MAX]; - TdThreadRwlock locks[SDB_MAX]; - SdbInsertFp insertFps[SDB_MAX]; - SdbUpdateFp updateFps[SDB_MAX]; - SdbDeleteFp deleteFps[SDB_MAX]; - SdbDeployFp deployFps[SDB_MAX]; - SdbEncodeFp encodeFps[SDB_MAX]; - SdbDecodeFp decodeFps[SDB_MAX]; -} SSdb; +int32_t sdbStartRead(SSdb *pSdb, SSdbIter **ppIter); +int32_t sdbStopRead(SSdb *pSdb, SSdbIter *pIter); +int32_t sdbDoRead(SSdb *pSdb, SSdbIter *pIter, void **ppBuf, int32_t *len); + +int32_t sdbStartWrite(SSdb *pSdb, SSdbIter **ppIter); +int32_t sdbStopWrite(SSdb *pSdb, SSdbIter *pIter, bool isApply); +int32_t sdbDoWrite(SSdb *pSdb, SSdbIter *pIter, void *pBuf, int32_t len); + +const char *sdbTableName(ESdbType type); +void sdbPrintOper(SSdb *pSdb, SSdbRow *pRow, const char *oper); +int32_t sdbGetIdFromRaw(SSdb *pSdb, SSdbRaw *pRaw); #ifdef __cplusplus } diff --git a/source/dnode/mnode/sdb/inc/sdbInt.h b/source/dnode/mnode/sdb/inc/sdbInt.h deleted file mode 100644 index c49d6e8fb287619d9503282dd2e164ed432ce823..0000000000000000000000000000000000000000 --- a/source/dnode/mnode/sdb/inc/sdbInt.h +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#ifndef _TD_SDB_INT_H_ -#define _TD_SDB_INT_H_ - -#include "os.h" - -#include "sdb.h" - -#ifdef __cplusplus -extern "C" { -#endif - -// clang-format off -#define mFatal(...) { if (mDebugFlag & DEBUG_FATAL) { taosPrintLog("MND FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }} -#define mError(...) { if (mDebugFlag & DEBUG_ERROR) { taosPrintLog("MND ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} -#define mWarn(...) { if (mDebugFlag & DEBUG_WARN) { taosPrintLog("MND WARN ", DEBUG_WARN, 255, __VA_ARGS__); }} -#define mInfo(...) { if (mDebugFlag & DEBUG_INFO) { taosPrintLog("MND ", DEBUG_INFO, 255, __VA_ARGS__); }} -#define mDebug(...) { if (mDebugFlag & DEBUG_DEBUG) { taosPrintLog("MND ", DEBUG_DEBUG, mDebugFlag, __VA_ARGS__); }} -#define mTrace(...) { if (mDebugFlag & DEBUG_TRACE) { taosPrintLog("MND ", DEBUG_TRACE, mDebugFlag, __VA_ARGS__); }} -// clang-format on - -typedef struct SSdbRaw { - int8_t type; - int8_t status; - int8_t sver; - int8_t reserved; - int32_t dataLen; - char pData[]; -} SSdbRaw; - -typedef struct SSdbRow { - ESdbType type; - ESdbStatus status; - int32_t refCount; - char pObj[]; -} SSdbRow; - -const char *sdbTableName(ESdbType type); -void sdbPrintOper(SSdb *pSdb, SSdbRow *pRow, const char *oper); - -void sdbFreeRow(SSdb *pSdb, SSdbRow *pRow, bool callFunc); - -#ifdef __cplusplus -} -#endif - -#endif /*_TD_SDB_INT_H_*/ diff --git a/source/dnode/mnode/sdb/src/sdb.c b/source/dnode/mnode/sdb/src/sdb.c index 1f11a77e6c7575a8f602bb4720b0445b5c5c0372..485b729deb52ffcdf4c5b76c1999124a5157f5b2 100644 --- a/source/dnode/mnode/sdb/src/sdb.c +++ b/source/dnode/mnode/sdb/src/sdb.c @@ -14,7 +14,7 @@ */ #define _DEFAULT_SOURCE -#include "sdbInt.h" +#include "sdb.h" static int32_t sdbCreateDir(SSdb *pSdb); @@ -31,11 +31,9 @@ SSdb *sdbInit(SSdbOpt *pOption) { char path[PATH_MAX + 100] = {0}; snprintf(path, sizeof(path), "%s%sdata", pOption->path, TD_DIRSEP); pSdb->currDir = strdup(path); - snprintf(path, sizeof(path), "%s%ssync", pOption->path, TD_DIRSEP); - pSdb->syncDir = strdup(path); snprintf(path, sizeof(path), "%s%stmp", pOption->path, TD_DIRSEP); pSdb->tmpDir = strdup(path); - if (pSdb->currDir == NULL || pSdb->currDir == NULL || pSdb->currDir == NULL) { + if (pSdb->currDir == NULL || pSdb->tmpDir == NULL) { sdbCleanup(pSdb); terrno = TSDB_CODE_OUT_OF_MEMORY; mError("failed to init sdb since %s", terrstr()); @@ -55,8 +53,10 @@ SSdb *sdbInit(SSdbOpt *pOption) { } pSdb->curVer = -1; + pSdb->curTerm = -1; pSdb->lastCommitVer = -1; pSdb->pMnode = pOption->pMnode; + taosThreadMutexInit(&pSdb->filelock, NULL); mDebug("sdb init successfully"); return pSdb; } @@ -70,11 +70,8 @@ void sdbCleanup(SSdb *pSdb) { taosMemoryFreeClear(pSdb->currDir); } - if (pSdb->syncDir != NULL) { - taosMemoryFreeClear(pSdb->syncDir); - } - if (pSdb->tmpDir != NULL) { + taosRemoveDir(pSdb->tmpDir); taosMemoryFreeClear(pSdb->tmpDir); } @@ -105,6 +102,7 @@ void sdbCleanup(SSdb *pSdb) { mDebug("sdb table:%s is cleaned up", sdbTableName(i)); } + taosThreadMutexDestroy(&pSdb->filelock); taosMemoryFree(pSdb); mDebug("sdb is cleaned up"); } @@ -149,12 +147,6 @@ static int32_t sdbCreateDir(SSdb *pSdb) { return -1; } - if (taosMkDir(pSdb->syncDir) != 0) { - terrno = TAOS_SYSTEM_ERROR(errno); - mError("failed to create dir:%s since %s", pSdb->syncDir, terrstr()); - return -1; - } - if (taosMkDir(pSdb->tmpDir) != 0) { terrno = TAOS_SYSTEM_ERROR(errno); mError("failed to create dir:%s since %s", pSdb->tmpDir, terrstr()); @@ -164,4 +156,10 @@ static int32_t sdbCreateDir(SSdb *pSdb) { return 0; } -int64_t sdbUpdateVer(SSdb *pSdb, int32_t val) { return atomic_add_fetch_64(&pSdb->curVer, val); } \ No newline at end of file +void sdbSetApplyIndex(SSdb *pSdb, int64_t index) { pSdb->curVer = index; } + +int64_t sdbGetApplyIndex(SSdb *pSdb) { return pSdb->curVer; } + +void sdbSetApplyTerm(SSdb *pSdb, int64_t term) { pSdb->curTerm = term; } + +int64_t sdbGetApplyTerm(SSdb *pSdb) { return pSdb->curTerm; } diff --git a/source/dnode/mnode/sdb/src/sdbFile.c b/source/dnode/mnode/sdb/src/sdbFile.c index a391ea8d03c6e2119fb21e0d0178ee9096883d48..834e7a00c8c58638a9ac51ec498f87d66abe2b1e 100644 --- a/source/dnode/mnode/sdb/src/sdbFile.c +++ b/source/dnode/mnode/sdb/src/sdbFile.c @@ -14,7 +14,7 @@ */ #define _DEFAULT_SOURCE -#include "sdbInt.h" +#include "sdb.h" #include "tchecksum.h" #include "wal.h" @@ -22,13 +22,14 @@ #define SDB_RESERVE_SIZE 512 #define SDB_FILE_VER 1 -static int32_t sdbRunDeployFp(SSdb *pSdb) { +static int32_t sdbDeployData(SSdb *pSdb) { mDebug("start to deploy sdb"); for (int32_t i = SDB_MAX - 1; i >= 0; --i) { SdbDeployFp fp = pSdb->deployFps[i]; if (fp == NULL) continue; + mDebug("start to deploy sdb:%s", sdbTableName(i)); if ((*fp)(pSdb->pMnode) != 0) { mError("failed to deploy sdb:%s since %s", sdbTableName(i), terrstr()); return -1; @@ -39,6 +40,39 @@ static int32_t sdbRunDeployFp(SSdb *pSdb) { return 0; } +static void sdbResetData(SSdb *pSdb) { + mDebug("start to reset sdb"); + + for (ESdbType i = 0; i < SDB_MAX; ++i) { + SHashObj *hash = pSdb->hashObjs[i]; + if (hash == NULL) continue; + + SSdbRow **ppRow = taosHashIterate(hash, NULL); + while (ppRow != NULL) { + SSdbRow *pRow = *ppRow; + if (pRow == NULL) continue; + + sdbFreeRow(pSdb, pRow, true); + ppRow = taosHashIterate(hash, ppRow); + } + } + + for (ESdbType i = 0; i < SDB_MAX; ++i) { + SHashObj *hash = pSdb->hashObjs[i]; + if (hash == NULL) continue; + + taosHashClear(pSdb->hashObjs[i]); + pSdb->tableVer[i] = 0; + pSdb->maxId[i] = 0; + mDebug("sdb:%s is reset", sdbTableName(i)); + } + + pSdb->curVer = -1; + pSdb->curTerm = -1; + pSdb->lastCommitVer = -1; + mDebug("sdb reset successfully"); +} + static int32_t sdbReadFileHead(SSdb *pSdb, TdFilePtr pFile) { int64_t sver = 0; int32_t ret = taosReadFile(pFile, &sver, sizeof(int64_t)); @@ -65,6 +99,16 @@ static int32_t sdbReadFileHead(SSdb *pSdb, TdFilePtr pFile) { return -1; } + ret = taosReadFile(pFile, &pSdb->curTerm, sizeof(int64_t)); + if (ret < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; + } + if (ret != sizeof(int64_t)) { + terrno = TSDB_CODE_FILE_CORRUPTED; + return -1; + } + for (int32_t i = 0; i < SDB_TABLE_SIZE; ++i) { int64_t maxId = 0; ret = taosReadFile(pFile, &maxId, sizeof(int64_t)); @@ -123,6 +167,11 @@ static int32_t sdbWriteFileHead(SSdb *pSdb, TdFilePtr pFile) { return -1; } + if (taosWriteFile(pFile, &pSdb->curTerm, sizeof(int64_t)) != sizeof(int64_t)) { + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; + } + for (int32_t i = 0; i < SDB_TABLE_SIZE; ++i) { int64_t maxId = 0; if (i < SDB_MAX) { @@ -154,11 +203,15 @@ static int32_t sdbWriteFileHead(SSdb *pSdb, TdFilePtr pFile) { return 0; } -int32_t sdbReadFile(SSdb *pSdb) { +static int32_t sdbReadFileImp(SSdb *pSdb) { int64_t offset = 0; int32_t code = 0; int32_t readLen = 0; int64_t ret = 0; + char file[PATH_MAX] = {0}; + + snprintf(file, sizeof(file), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP); + mDebug("start to read file:%s", file); SSdbRaw *pRaw = taosMemoryMalloc(WAL_MAX_SIZE + 100); if (pRaw == NULL) { @@ -167,10 +220,6 @@ int32_t sdbReadFile(SSdb *pSdb) { return -1; } - char file[PATH_MAX] = {0}; - snprintf(file, sizeof(file), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP); - mDebug("start to read file:%s", file); - TdFilePtr pFile = taosOpenFile(file, TD_FILE_READ); if (pFile == NULL) { taosMemoryFree(pRaw); @@ -181,7 +230,6 @@ int32_t sdbReadFile(SSdb *pSdb) { if (sdbReadFileHead(pSdb, pFile) != 0) { mError("failed to read file:%s head since %s", file, terrstr()); - pSdb->curVer = -1; taosMemoryFree(pRaw); taosCloseFile(&pFile); return -1; @@ -248,6 +296,20 @@ _OVER: return code; } +int32_t sdbReadFile(SSdb *pSdb) { + taosThreadMutexLock(&pSdb->filelock); + + sdbResetData(pSdb); + int32_t code = sdbReadFileImp(pSdb); + if (code != 0) { + mError("failed to read sdb since %s", terrstr()); + sdbResetData(pSdb); + } + + taosThreadMutexUnlock(&pSdb->filelock); + return code; +} + static int32_t sdbWriteFileImp(SSdb *pSdb) { int32_t code = 0; @@ -256,8 +318,8 @@ static int32_t sdbWriteFileImp(SSdb *pSdb) { char curfile[PATH_MAX] = {0}; snprintf(curfile, sizeof(curfile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP); - mDebug("start to write file:%s, current ver:%" PRId64 ", commit ver:%" PRId64, curfile, pSdb->curVer, - pSdb->lastCommitVer); + mDebug("start to write file:%s, current ver:%" PRId64 " term:%" PRId64 ", commit ver:%" PRId64, curfile, pSdb->curVer, + pSdb->curTerm, pSdb->lastCommitVer); TdFilePtr pFile = taosOpenFile(tmpfile, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC); if (pFile == NULL) { @@ -350,7 +412,7 @@ static int32_t sdbWriteFileImp(SSdb *pSdb) { mError("failed to write file:%s since %s", curfile, tstrerror(code)); } else { pSdb->lastCommitVer = pSdb->curVer; - mDebug("write file:%s successfully, ver:%" PRId64, curfile, pSdb->lastCommitVer); + mDebug("write file:%s successfully, ver:%" PRId64 " term:%" PRId64, curfile, pSdb->lastCommitVer, pSdb->curTerm); } terrno = code; @@ -362,17 +424,187 @@ int32_t sdbWriteFile(SSdb *pSdb) { return 0; } - return sdbWriteFileImp(pSdb); + taosThreadMutexLock(&pSdb->filelock); + int32_t code = sdbWriteFileImp(pSdb); + if (code != 0) { + mError("failed to write sdb since %s", terrstr()); + } + taosThreadMutexUnlock(&pSdb->filelock); + return code; } int32_t sdbDeploy(SSdb *pSdb) { - if (sdbRunDeployFp(pSdb) != 0) { + if (sdbDeployData(pSdb) != 0) { + return -1; + } + + if (sdbWriteFile(pSdb) != 0) { + return -1; + } + + return 0; +} + +static SSdbIter *sdbCreateIter(SSdb *pSdb) { + SSdbIter *pIter = taosMemoryCalloc(1, sizeof(SSdbIter)); + if (pIter == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return NULL; + } + + char name[PATH_MAX + 100] = {0}; + snprintf(name, sizeof(name), "%s%ssdb.data.%" PRIu64, pSdb->tmpDir, TD_DIRSEP, (uint64_t)pIter); + pIter->name = strdup(name); + if (pIter->name == NULL) { + taosMemoryFree(pIter); + terrno = TSDB_CODE_OUT_OF_MEMORY; + return NULL; + } + + return pIter; +} + +static void sdbCloseIter(SSdbIter *pIter) { + if (pIter == NULL) return; + + if (pIter->file != NULL) { + taosCloseFile(&pIter->file); + pIter->file = NULL; + } + + if (pIter->name != NULL) { + taosRemoveFile(pIter->name); + taosMemoryFree(pIter->name); + pIter->name = NULL; + } + + mInfo("sdbiter:%p, is closed, total:%" PRId64, pIter, pIter->total); + taosMemoryFree(pIter); +} + +int32_t sdbStartRead(SSdb *pSdb, SSdbIter **ppIter) { + SSdbIter *pIter = sdbCreateIter(pSdb); + if (pIter == NULL) return -1; + + char datafile[PATH_MAX] = {0}; + snprintf(datafile, sizeof(datafile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP); + + taosThreadMutexLock(&pSdb->filelock); + if (taosCopyFile(datafile, pIter->name) < 0) { + taosThreadMutexUnlock(&pSdb->filelock); + terrno = TAOS_SYSTEM_ERROR(errno); + mError("failed to copy file %s to %s since %s", datafile, pIter->name, terrstr()); + sdbCloseIter(pIter); return -1; } + taosThreadMutexUnlock(&pSdb->filelock); - if (sdbWriteFileImp(pSdb) != 0) { + pIter->file = taosOpenFile(pIter->name, TD_FILE_READ); + if (pIter->file == NULL) { + terrno = TAOS_SYSTEM_ERROR(errno); + mError("failed to open file:%s since %s", pIter->name, terrstr()); + sdbCloseIter(pIter); return -1; } + *ppIter = pIter; + mInfo("sdbiter:%p, is created to read snapshot, file:%s", pIter, pIter->name); return 0; } + +int32_t sdbStopRead(SSdb *pSdb, SSdbIter *pIter) { + sdbCloseIter(pIter); + return 0; +} + +int32_t sdbDoRead(SSdb *pSdb, SSdbIter *pIter, void **ppBuf, int32_t *len) { + int32_t maxlen = 100; + void *pBuf = taosMemoryCalloc(1, maxlen); + if (pBuf == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return -1; + } + + int32_t readlen = taosReadFile(pIter->file, pBuf, maxlen); + if (readlen < 0 || readlen > maxlen) { + terrno = TAOS_SYSTEM_ERROR(errno); + mError("sdbiter:%p, failed to read snapshot since %s, total:%" PRId64, pIter, terrstr(), pIter->total); + *ppBuf = NULL; + *len = 0; + taosMemoryFree(pBuf); + return -1; + } else if (readlen == 0) { + mInfo("sdbiter:%p, read snapshot to the end, total:%" PRId64, pIter, pIter->total); + *ppBuf = NULL; + *len = 0; + taosMemoryFree(pBuf); + return 0; + } else { // (readlen <= maxlen) + pIter->total += readlen; + mInfo("sdbiter:%p, read:%d bytes from snapshot, total:%" PRId64, pIter, readlen, pIter->total); + *ppBuf = pBuf; + *len = readlen; + return 0; + } +} + +int32_t sdbStartWrite(SSdb *pSdb, SSdbIter **ppIter) { + SSdbIter *pIter = sdbCreateIter(pSdb); + if (pIter == NULL) return -1; + + pIter->file = taosOpenFile(pIter->name, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC); + if (pIter->file == NULL) { + terrno = TAOS_SYSTEM_ERROR(errno); + mError("failed to open %s since %s", pIter->name, terrstr()); + return -1; + } + + *ppIter = pIter; + mInfo("sdbiter:%p, is created to write snapshot, file:%s", pIter, pIter->name); + return 0; +} + +int32_t sdbStopWrite(SSdb *pSdb, SSdbIter *pIter, bool isApply) { + int32_t code = 0; + + if (!isApply) { + sdbCloseIter(pIter); + mInfo("sdbiter:%p, not apply to sdb", pIter); + return 0; + } + + taosFsyncFile(pIter->file); + taosCloseFile(&pIter->file); + pIter->file = NULL; + + char datafile[PATH_MAX] = {0}; + snprintf(datafile, sizeof(datafile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP); + if (taosRenameFile(pIter->name, datafile) != 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + mError("sdbiter:%p, failed to rename file %s to %s since %s", pIter, pIter->name, datafile, terrstr()); + sdbCloseIter(pIter); + return -1; + } + + sdbCloseIter(pIter); + if (sdbReadFile(pSdb) != 0) { + mError("sdbiter:%p, failed to read from %s since %s", pIter, datafile, terrstr()); + return -1; + } + + mInfo("sdbiter:%p, successfully applyed to sdb", pIter); + return 0; +} + +int32_t sdbDoWrite(SSdb *pSdb, SSdbIter *pIter, void *pBuf, int32_t len) { + int32_t writelen = taosWriteFile(pIter->file, pBuf, len); + if (writelen != len) { + terrno = TAOS_SYSTEM_ERROR(errno); + mError("failed to write len:%d since %s, total:%" PRId64, len, terrstr(), pIter->total); + return -1; + } + + pIter->total += writelen; + mInfo("sdbiter:%p, write:%d bytes to snapshot, total:%" PRId64, pIter, writelen, pIter->total); + return 0; +} \ No newline at end of file diff --git a/source/dnode/mnode/sdb/src/sdbHash.c b/source/dnode/mnode/sdb/src/sdbHash.c index a25c7a5233d79049e22764717e95f95a1f0f3674..abf35b71a91ea368b6d1bbc8e0927be59642ce6d 100644 --- a/source/dnode/mnode/sdb/src/sdbHash.c +++ b/source/dnode/mnode/sdb/src/sdbHash.c @@ -14,7 +14,7 @@ */ #define _DEFAULT_SOURCE -#include "sdbInt.h" +#include "sdb.h" static void sdbCheckRow(SSdb *pSdb, SSdbRow *pRow); diff --git a/source/dnode/mnode/sdb/src/sdbRaw.c b/source/dnode/mnode/sdb/src/sdbRaw.c index fd2f20c242bff4bf96fc1289b3996be9d87462af..90643a54a9de42d4f505fdcb4f1d25ef95b80ac7 100644 --- a/source/dnode/mnode/sdb/src/sdbRaw.c +++ b/source/dnode/mnode/sdb/src/sdbRaw.c @@ -14,7 +14,17 @@ */ #define _DEFAULT_SOURCE -#include "sdbInt.h" +#include "sdb.h" + +int32_t sdbGetIdFromRaw(SSdb *pSdb, SSdbRaw *pRaw) { + EKeyType keytype = pSdb->keyTypes[pRaw->type]; + if (keytype == SDB_KEY_INT32) { + int32_t id = *((int32_t *)(pRaw->pData)); + return id; + } else { + return -2; + } +} SSdbRaw *sdbAllocRaw(ESdbType type, int8_t sver, int32_t dataLen) { SSdbRaw *pRaw = taosMemoryCalloc(1, dataLen + sizeof(SSdbRaw)); diff --git a/source/dnode/mnode/sdb/src/sdbRow.c b/source/dnode/mnode/sdb/src/sdbRow.c index 43f70cb2453358bf115cc44e65d13a5728c9160f..e57a6b028bf9b134c771e2cf82724951a8c87217 100644 --- a/source/dnode/mnode/sdb/src/sdbRow.c +++ b/source/dnode/mnode/sdb/src/sdbRow.c @@ -14,7 +14,7 @@ */ #define _DEFAULT_SOURCE -#include "sdbInt.h" +#include "sdb.h" SSdbRow *sdbAllocRow(int32_t objSize) { SSdbRow *pRow = taosMemoryCalloc(1, objSize + sizeof(SSdbRow)); diff --git a/source/dnode/qnode/src/qnode.c b/source/dnode/qnode/src/qnode.c index 929643fcdf91ef7ba0d6a02b8a07de34f0209d54..40aa572a56709a97e454cdc82cb7e97852356b27 100644 --- a/source/dnode/qnode/src/qnode.c +++ b/source/dnode/qnode/src/qnode.c @@ -40,46 +40,46 @@ void qndClose(SQnode *pQnode) { taosMemoryFree(pQnode); } -int32_t qndGetLoad(SQnode *pQnode, SQnodeLoad *pLoad) { return 0; } +int32_t qndGetLoad(SQnode *pQnode, SQnodeLoad *pLoad) { + SMsgCb* pCb = &pQnode->msgCb; -int32_t qndProcessQueryMsg(SQnode *pQnode, SRpcMsg *pMsg) { + pLoad->numOfQueryInQueue = pCb->qsizeFp(pCb->mgmt, pQnode->qndId, QUERY_QUEUE); + pLoad->numOfFetchInQueue = pCb->qsizeFp(pCb->mgmt, pQnode->qndId, FETCH_QUEUE); + pLoad->waitTimeInQueryQUeue = qWorkerGetWaitTimeInQueue(pQnode->pQuery, QUERY_QUEUE); + pLoad->waitTimeInFetchQUeue = qWorkerGetWaitTimeInQueue(pQnode->pQuery, FETCH_QUEUE); + + return 0; +} + +int32_t qndProcessQueryMsg(SQnode *pQnode, int64_t ts, SRpcMsg *pMsg) { int32_t code = -1; SReadHandle handle = {.pMsgCb = &pQnode->msgCb}; qTrace("message in qnode queue is processing"); switch (pMsg->msgType) { case TDMT_VND_QUERY: - code = qWorkerProcessQueryMsg(&handle, pQnode->pQuery, pMsg); + code = qWorkerProcessQueryMsg(&handle, pQnode->pQuery, pMsg, ts); break; case TDMT_VND_QUERY_CONTINUE: - code = qWorkerProcessCQueryMsg(&handle, pQnode->pQuery, pMsg); + code = qWorkerProcessCQueryMsg(&handle, pQnode->pQuery, pMsg, ts); break; case TDMT_VND_FETCH: - code = qWorkerProcessFetchMsg(pQnode, pQnode->pQuery, pMsg); + code = qWorkerProcessFetchMsg(pQnode, pQnode->pQuery, pMsg, ts); break; case TDMT_VND_FETCH_RSP: - code = qWorkerProcessFetchRsp(pQnode, pQnode->pQuery, pMsg); - break; - case TDMT_VND_RES_READY: - code = qWorkerProcessReadyMsg(pQnode, pQnode->pQuery, pMsg); - break; - case TDMT_VND_TASKS_STATUS: - code = qWorkerProcessStatusMsg(pQnode, pQnode->pQuery, pMsg); + code = qWorkerProcessFetchRsp(pQnode, pQnode->pQuery, pMsg, ts); break; case TDMT_VND_CANCEL_TASK: - code = qWorkerProcessCancelMsg(pQnode, pQnode->pQuery, pMsg); + code = qWorkerProcessCancelMsg(pQnode, pQnode->pQuery, pMsg, ts); break; case TDMT_VND_DROP_TASK: - code = qWorkerProcessDropMsg(pQnode, pQnode->pQuery, pMsg); + code = qWorkerProcessDropMsg(pQnode, pQnode->pQuery, pMsg, ts); break; - case TDMT_VND_TABLE_META: - // code = vnodeGetTableMeta(pQnode, pMsg); - // break; case TDMT_VND_CONSUME: // code = tqProcessConsumeReq(pQnode->pTq, pMsg); // break; case TDMT_VND_QUERY_HEARTBEAT: - code = qWorkerProcessHbMsg(pQnode, pQnode->pQuery, pMsg); + code = qWorkerProcessHbMsg(pQnode, pQnode->pQuery, pMsg, ts); break; default: qError("unknown msg type:%d in qnode queue", pMsg->msgType); diff --git a/source/dnode/snode/src/snode.c b/source/dnode/snode/src/snode.c index 7d7c01a870a3b2a07e1fe27ffb56bbc079dfb73b..37b406466df5e229833d8271c81cdfeb28084a80 100644 --- a/source/dnode/snode/src/snode.c +++ b/source/dnode/snode/src/snode.c @@ -57,9 +57,7 @@ void sndMetaDelete(SStreamMeta *pMeta) { } int32_t sndMetaDeployTask(SStreamMeta *pMeta, SStreamTask *pTask) { - for (int i = 0; i < pTask->exec.numOfRunners; i++) { - pTask->exec.runners[i].executor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, NULL); - } + pTask->exec.executor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, NULL); return taosHashPut(pMeta->pHash, &pTask->taskId, sizeof(int32_t), pTask, sizeof(void *)); } @@ -105,8 +103,8 @@ void sndProcessUMsg(SSnode *pSnode, SRpcMsg *pMsg) { tDecoderClear(&decoder); sndMetaDeployTask(pSnode->pMeta, pTask); - } else if (pMsg->msgType == TDMT_SND_TASK_EXEC) { - sndProcessTaskExecReq(pSnode, pMsg); + /*} else if (pMsg->msgType == TDMT_SND_TASK_EXEC) {*/ + /*sndProcessTaskExecReq(pSnode, pMsg);*/ } else { ASSERT(0); } @@ -114,9 +112,9 @@ void sndProcessUMsg(SSnode *pSnode, SRpcMsg *pMsg) { void sndProcessSMsg(SSnode *pSnode, SRpcMsg *pMsg) { // operator exec - if (pMsg->msgType == TDMT_SND_TASK_EXEC) { - sndProcessTaskExecReq(pSnode, pMsg); - } else { - ASSERT(0); - } + /*if (pMsg->msgType == TDMT_SND_TASK_EXEC) {*/ + /*sndProcessTaskExecReq(pSnode, pMsg);*/ + /*} else {*/ + ASSERT(0); + /*}*/ } diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index a8e3860ed14035d5bedd5e1b096c45e1f03ec6e3..d988f97188b9330e1229368554b0f75a5713025b 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -13,6 +13,8 @@ target_sources( "src/vnd/vnodeModule.c" "src/vnd/vnodeSvr.c" "src/vnd/vnodeSync.c" + "src/vnd/vnodeSnapshot.c" + "src/vnd/vnodeUtil.c" # meta "src/meta/metaOpen.c" @@ -22,6 +24,7 @@ target_sources( "src/meta/metaQuery.c" "src/meta/metaCommit.c" "src/meta/metaEntry.c" + "src/meta/metaSnapshot.c" # sma "src/sma/sma.c" @@ -44,11 +47,11 @@ target_sources( "src/tsdb/tsdbReadImpl.c" # "src/tsdb/tsdbSma.c" "src/tsdb/tsdbWrite.c" + "src/tsdb/tsdbSnapshot.c" # tq "src/tq/tq.c" "src/tq/tqCommit.c" - "src/tq/tqMetaStore.c" "src/tq/tqOffset.c" "src/tq/tqPush.c" "src/tq/tqRead.c" @@ -76,9 +79,14 @@ target_link_libraries( #PUBLIC scalar PUBLIC transport PUBLIC stream + PUBLIC index ) target_compile_definitions(vnode PUBLIC -DMETA_REFACT) - +if (${BUILD_WITH_INVERTEDINDEX}) + add_definitions(-DUSE_INVERTED_INDEX) +endif(${BUILD_WITH_INVERTEDINDEX}) if(${BUILD_TEST}) add_subdirectory(test) endif(${BUILD_TEST}) + + diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h index db992f85d4cd37ffea4f26481b1c82eae65f3f3d..e4343e3bbf63a9dd847cc1bd2f79e2ef35721cd3 100644 --- a/source/dnode/vnode/inc/vnode.h +++ b/source/dnode/vnode/inc/vnode.h @@ -39,9 +39,10 @@ extern "C" { #endif // vnode -typedef struct SVnode SVnode; -typedef struct STsdbCfg STsdbCfg; // todo: remove -typedef struct SVnodeCfg SVnodeCfg; +typedef struct SVnode SVnode; +typedef struct STsdbCfg STsdbCfg; // todo: remove +typedef struct SVnodeCfg SVnodeCfg; +typedef struct SVSnapshotReader SVSnapshotReader; extern const SVnodeCfg vnodeCfgDefault; @@ -51,7 +52,7 @@ int32_t vnodeCreate(const char *path, SVnodeCfg *pCfg, STfs *pTfs); void vnodeDestroy(const char *path, STfs *pTfs); SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb); void vnodeClose(SVnode *pVnode); -int32_t vnodePreprocessWriteReqs(SVnode *pVnode, SArray *pMsgs, int64_t *version); +int32_t vnodePreprocessReq(SVnode *pVnode, SRpcMsg *pMsg); int32_t vnodeProcessWriteReq(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRpcMsg *pRsp); int32_t vnodeProcessCMsg(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp); int32_t vnodeProcessSyncReq(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp); @@ -59,13 +60,14 @@ int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg); int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo); int32_t vnodeGetLoad(SVnode *pVnode, SVnodeLoad *pLoad); int32_t vnodeValidateTableHash(SVnode *pVnode, char *tableFName); - int32_t vnodeStart(SVnode *pVnode); void vnodeStop(SVnode *pVnode); - int64_t vnodeGetSyncHandle(SVnode *pVnode); void vnodeGetSnapshot(SVnode *pVnode, SSnapshot *pSnapshot); void vnodeGetInfo(SVnode *pVnode, const char **dbname, int32_t *vgId); +int32_t vnodeSnapshotReaderOpen(SVnode *pVnode, SVSnapshotReader **ppReader, int64_t sver, int64_t ever); +int32_t vnodeSnapshotReaderClose(SVSnapshotReader *pReader); +int32_t vnodeSnapshotRead(SVSnapshotReader *pReader, const void **ppData, uint32_t *nData); // meta typedef struct SMeta SMeta; // todo: remove @@ -97,24 +99,21 @@ typedef void *tsdbReaderT; #define BLOCK_LOAD_TABLE_SEQ_ORDER 2 #define BLOCK_LOAD_TABLE_RR_ORDER 3 -tsdbReaderT *tsdbQueryTables(SVnode *pVnode, SQueryTableDataCond *pCond, STableGroupInfo *tableInfoGroup, uint64_t qId, +tsdbReaderT *tsdbQueryTables(SVnode *pVnode, SQueryTableDataCond *pCond, STableListInfo *tableInfoGroup, uint64_t qId, uint64_t taskId); -tsdbReaderT tsdbQueryCacheLast(SVnode *pVnode, SQueryTableDataCond *pCond, STableGroupInfo *groupList, uint64_t qId, +tsdbReaderT tsdbQueryCacheLast(SVnode *pVnode, SQueryTableDataCond *pCond, STableListInfo *groupList, uint64_t qId, void *pMemRef); int32_t tsdbGetFileBlocksDistInfo(tsdbReaderT *pReader, STableBlockDistInfo *pTableBlockInfo); bool isTsdbCacheLastRow(tsdbReaderT *pReader); -int32_t tsdbQuerySTableByTagCond(void *pMeta, uint64_t uid, TSKEY skey, const char *pTagCond, size_t len, - int16_t tagNameRelType, const char *tbnameCond, STableGroupInfo *pGroupInfo, - SColIndex *pColIndex, int32_t numOfCols, uint64_t reqId, uint64_t taskId); +int32_t tsdbGetAllTableList(SMeta *pMeta, uint64_t uid, SArray *list); +void * tsdbGetIdx(SMeta *pMeta); int64_t tsdbGetNumOfRowsInMemTable(tsdbReaderT *pHandle); -bool tsdbNextDataBlock(tsdbReaderT pTsdbReadHandle); -void tsdbRetrieveDataBlockInfo(tsdbReaderT *pTsdbReadHandle, SDataBlockInfo *pBlockInfo); + +bool tsdbNextDataBlock(tsdbReaderT pTsdbReadHandle); +void tsdbRetrieveDataBlockInfo(tsdbReaderT *pTsdbReadHandle, SDataBlockInfo *pBlockInfo); int32_t tsdbRetrieveDataBlockStatisInfo(tsdbReaderT *pTsdbReadHandle, SColumnDataAgg ***pBlockStatis, bool *allHave); SArray *tsdbRetrieveDataBlock(tsdbReaderT *pTsdbReadHandle, SArray *pColumnIdList); -void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond *pCond); -void tsdbDestroyTableGroup(STableGroupInfo *pGroupList); -int32_t tsdbGetOneTableGroup(void *pMeta, uint64_t uid, TSKEY startKey, STableGroupInfo *pGroupInfo); -int32_t tsdbGetTableGroupFromIdList(SVnode *pVnode, SArray *pTableIdList, STableGroupInfo *pGroupInfo); +void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond *pCond, int32_t tWinIdx); void tsdbCleanupReadHandle(tsdbReaderT queryHandle); // tq @@ -126,7 +125,7 @@ STqReadHandle *tqInitSubmitMsgScanner(SMeta *pMeta); void tqReadHandleSetColIdList(STqReadHandle *pReadHandle, SArray *pColIdList); int32_t tqReadHandleSetTbUidList(STqReadHandle *pHandle, const SArray *tbUidList); int32_t tqReadHandleAddTbUidList(STqReadHandle *pHandle, const SArray *tbUidList); -int32_t tqReadHandleRemoveTbUidList(STqReadHandle* pHandle, const SArray* tbUidList); +int32_t tqReadHandleRemoveTbUidList(STqReadHandle *pHandle, const SArray *tbUidList); int32_t tqReadHandleSetMsg(STqReadHandle *pHandle, SSubmitReq *pMsg, int64_t ver); bool tqNextDataBlock(STqReadHandle *pHandle); @@ -174,26 +173,26 @@ typedef struct { } STableKeyInfo; struct SMetaEntry { - int64_t version; - int8_t type; - tb_uid_t uid; - const char *name; + int64_t version; + int8_t type; + tb_uid_t uid; + char * name; union { struct { - SSchemaWrapper schema; + SSchemaWrapper schemaRow; SSchemaWrapper schemaTag; } stbEntry; struct { - int64_t ctime; - int32_t ttlDays; - tb_uid_t suid; - const uint8_t *pTags; + int64_t ctime; + int32_t ttlDays; + tb_uid_t suid; + uint8_t *pTags; } ctbEntry; struct { int64_t ctime; int32_t ttlDays; int32_t ncid; // next column id - SSchemaWrapper schema; + SSchemaWrapper schemaRow; } ntbEntry; struct { STSma *tsma; @@ -205,17 +204,17 @@ struct SMetaEntry { struct SMetaReader { int32_t flags; - SMeta *pMeta; + SMeta * pMeta; SDecoder coder; SMetaEntry me; - void *pBuf; + void * pBuf; int32_t szBuf; }; struct SMTbCursor { - TBC *pDbc; - void *pKey; - void *pVal; + TBC * pDbc; + void * pKey; + void * pVal; int32_t kLen; int32_t vLen; SMetaReader mr; diff --git a/source/dnode/vnode/src/inc/meta.h b/source/dnode/vnode/src/inc/meta.h index 693f4a0a2b4e0223f956990c59316b696b8a1c9a..3340bbb91ce0f8ed29b2ef48fc325472676b56e1 100644 --- a/source/dnode/vnode/src/inc/meta.h +++ b/source/dnode/vnode/src/inc/meta.h @@ -17,6 +17,7 @@ #define _TD_VNODE_META_H_ #include "vnodeInt.h" +#include "index.h" #ifdef __cplusplus extern "C" { @@ -61,16 +62,20 @@ static FORCE_INLINE tb_uid_t metaGenerateUid(SMeta* pMeta) { return tGenIdPI64() struct SMeta { TdThreadRwlock lock; - char* path; - SVnode* pVnode; - TDB* pEnv; - TXN txn; - TTB* pTbDb; - TTB* pSkmDb; - TTB* pUidIdx; - TTB* pNameIdx; - TTB* pCtbIdx; - TTB* pTagIdx; + char* path; + SVnode* pVnode; + TDB* pEnv; + TXN txn; + TTB* pTbDb; + TTB* pSkmDb; + TTB* pUidIdx; + TTB* pNameIdx; + TTB* pCtbIdx; +#ifdef USE_INVERTED_INDEX + void* pTagIvtIdx; +#else + TTB* pTagIdx; +#endif TTB* pTtlIdx; TTB* pSmaIdx; SMetaIdx* pIdx; diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h index a8a3e4f601ed40bc3d7b2e618da08d1145c84a53..06ff6329e0b3ddc69cc50ec1becc9541e3939ca5 100644 --- a/source/dnode/vnode/src/inc/tq.h +++ b/source/dnode/vnode/src/inc/tq.h @@ -20,9 +20,9 @@ #include "executor.h" #include "os.h" -#include "tcache.h" #include "thash.h" #include "tmsg.h" +#include "tqueue.h" #include "trpc.h" #include "ttimer.h" #include "wal.h" @@ -41,45 +41,6 @@ extern "C" { #define tqTrace(...) do { if (tqDebugFlag & DEBUG_TRACE) { taosPrintLog("TQ ", DEBUG_TRACE, tqDebugFlag, __VA_ARGS__); }} while(0) // clang-format on -#define TQ_BUFFER_SIZE 4 - -#define TQ_BUCKET_MASK 0xFF -#define TQ_BUCKET_SIZE 256 - -#define TQ_PAGE_SIZE 4096 -// key + offset + size -#define TQ_IDX_SIZE 24 -// 4096 / 24 -#define TQ_MAX_IDX_ONE_PAGE 170 -// 24 * 170 -#define TQ_IDX_PAGE_BODY_SIZE 4080 -// 4096 - 4080 -#define TQ_IDX_PAGE_HEAD_SIZE 16 - -#define TQ_ACTION_CONST 0 -#define TQ_ACTION_INUSE 1 -#define TQ_ACTION_INUSE_CONT 2 -#define TQ_ACTION_INTXN 3 - -#define TQ_SVER 0 - -// TODO: inplace mode is not implemented -#define TQ_UPDATE_INPLACE 0 -#define TQ_UPDATE_APPEND 1 - -#define TQ_DUP_INTXN_REWRITE 0 -#define TQ_DUP_INTXN_REJECT 2 - -static inline bool tqUpdateAppend(int32_t tqConfigFlag) { return tqConfigFlag & TQ_UPDATE_APPEND; } - -static inline bool tqDupIntxnReject(int32_t tqConfigFlag) { return tqConfigFlag & TQ_DUP_INTXN_REJECT; } - -static const int8_t TQ_CONST_DELETE = TQ_ACTION_CONST; - -#define TQ_DELETE_TOKEN (void*)&TQ_CONST_DELETE - -typedef enum { TQ_ITEM_READY, TQ_ITEM_PROCESS, TQ_ITEM_EMPTY } STqItemStatus; - typedef struct STqOffsetCfg STqOffsetCfg; typedef struct STqOffsetStore STqOffsetStore; @@ -98,53 +59,6 @@ struct STqReadHandle { STSchema* pSchema; }; -typedef struct { - int16_t ver; - int16_t action; - int32_t checksum; - int64_t ssize; - char content[]; -} STqSerializedHead; - -typedef int32_t (*FTqSerialize)(const void* pObj, STqSerializedHead** ppHead); -typedef int32_t (*FTqDeserialize)(void* self, const STqSerializedHead* pHead, void** ppObj); -typedef void (*FTqDelete)(void*); - -typedef struct { - int64_t key; - int64_t offset; - int64_t serializedSize; - void* valueInUse; - void* valueInTxn; -} STqMetaHandle; - -typedef struct STqMetaList { - STqMetaHandle handle; - struct STqMetaList* next; - // struct STqMetaList* inTxnPrev; - // struct STqMetaList* inTxnNext; - struct STqMetaList* unpersistPrev; - struct STqMetaList* unpersistNext; -} STqMetaList; - -typedef struct { - STQ* pTq; - STqMetaList* bucket[TQ_BUCKET_SIZE]; - // a table head - STqMetaList* unpersistHead; - // topics that are not connectted - STqMetaList* unconnectTopic; - - TdFilePtr pFile; - TdFilePtr pIdxFile; - - char* dirPath; - int32_t tqConfigFlag; - FTqSerialize pSerializer; - FTqDeserialize pDeserializer; - FTqDelete pDeleter; -} STqMetaStore; - typedef struct { int64_t consumerId; int32_t epoch; @@ -172,15 +86,18 @@ typedef struct { qTaskInfo_t task[5]; } STqExec; +int32_t tEncodeSTqExec(SEncoder* pEncoder, const STqExec* pExec); +int32_t tDecodeSTqExec(SDecoder* pDecoder, STqExec* pExec); + struct STQ { - char* path; - // STqMetaStore* tqMeta; + char* path; SHashObj* pushMgr; // consumerId -> STqExec* SHashObj* execs; // subKey -> STqExec SHashObj* pStreamTasks; SVnode* pVnode; SWal* pWal; - // TDB* pTdb; + TDB* pMetaStore; + TTB* pExecStore; }; typedef struct { @@ -188,89 +105,12 @@ typedef struct { tmr_h timer; } STqMgmt; -static STqMgmt tqMgmt; - -typedef struct { - int8_t status; - int64_t offset; - qTaskInfo_t task; - STqReadHandle* pReadHandle; -} STqTaskItem; - -// new version -typedef struct { - int64_t firstOffset; - int64_t lastOffset; - STqTaskItem output[TQ_BUFFER_SIZE]; -} STqBuffer; - -typedef struct { - char topicName[TSDB_TOPIC_FNAME_LEN]; - char* sql; - char* logicalPlan; - char* physicalPlan; - char* qmsg; - STqBuffer buffer; - SWalReadHandle* pReadhandle; -} STqTopic; - -typedef struct { - int64_t consumerId; - int32_t epoch; - char cgroup[TSDB_TOPIC_FNAME_LEN]; - SArray* topics; // SArray -} STqConsumer; - -typedef struct { - int8_t type; - int8_t nodeType; - int8_t reserved[6]; - int64_t streamId; - qTaskInfo_t task; - // TODO sync function -} STqStreamPusher; - -typedef struct { - int8_t inited; - tmr_h timer; -} STqPushMgmt; - -static STqPushMgmt tqPushMgmt; +static STqMgmt tqMgmt = {0}; // init once int tqInit(); void tqCleanUp(); -// open in each vnode -// required by vnode - -int32_t tqSerializeConsumer(const STqConsumer*, STqSerializedHead**); -int32_t tqDeserializeConsumer(STQ*, const STqSerializedHead*, STqConsumer**); - -static int FORCE_INLINE tqQueryExecuting(int32_t status) { return status; } - -// tqMetaStore.h -STqMetaStore* tqStoreOpen(STQ* pTq, const char* path, FTqSerialize pSerializer, FTqDeserialize pDeserializer, - FTqDelete pDeleter, int32_t tqConfigFlag); -int32_t tqStoreClose(STqMetaStore*); -// int32_t tqStoreDelete(TqMetaStore*); -// int32_t tqStoreCommitAll(TqMetaStore*); -int32_t tqStorePersist(STqMetaStore*); -// clean deleted idx and data from persistent file -int32_t tqStoreCompact(STqMetaStore*); - -void* tqHandleGet(STqMetaStore*, int64_t key); -// make it unpersist -void* tqHandleTouchGet(STqMetaStore*, int64_t key); -int32_t tqHandleMovePut(STqMetaStore*, int64_t key, void* value); -int32_t tqHandleCopyPut(STqMetaStore*, int64_t key, void* value, size_t vsize); -// delete committed kv pair -// notice that a delete action still needs to be committed -int32_t tqHandleDel(STqMetaStore*, int64_t key); -int32_t tqHandlePurge(STqMetaStore*, int64_t key); -int32_t tqHandleCommit(STqMetaStore*, int64_t key); -int32_t tqHandleAbort(STqMetaStore*, int64_t key); - // tqOffset STqOffsetStore* STqOffsetOpen(STqOffsetCfg*); void STqOffsetClose(STqOffsetStore*); diff --git a/source/dnode/vnode/src/inc/tsdb.h b/source/dnode/vnode/src/inc/tsdb.h index 1195f9e2b397c00e4d02ead5db574d2d8252f1f9..2e4ff6a4abd8315afa06e9a881955947af9144c6 100644 --- a/source/dnode/vnode/src/inc/tsdb.h +++ b/source/dnode/vnode/src/inc/tsdb.h @@ -40,8 +40,8 @@ typedef struct STable STable; int tsdbMemTableCreate(STsdb *pTsdb, STsdbMemTable **ppMemTable); void tsdbMemTableDestroy(STsdb *pTsdb, STsdbMemTable *pMemTable); -int tsdbLoadDataFromCache(STable *pTable, SSkipListIterator *pIter, TSKEY maxKey, int maxRowsToRead, SDataCols *pCols, - TKEY *filterKeys, int nFilterKeys, bool keepDup, SMergeInfo *pMergeInfo); +int tsdbLoadDataFromCache(STsdb *pTsdb, STable *pTable, SSkipListIterator *pIter, TSKEY maxKey, int maxRowsToRead, + SDataCols *pCols, TKEY *filterKeys, int nFilterKeys, bool keepDup, SMergeInfo *pMergeInfo); // tsdbCommit ================ @@ -79,13 +79,14 @@ struct STsdb { struct STable { uint64_t tid; uint64_t uid; - STSchema *pSchema; + STSchema *pSchema; // latest schema + STSchema *pCacheSchema; // cached cache }; #define TABLE_TID(t) (t)->tid #define TABLE_UID(t) (t)->uid -int tsdbPrepareCommit(STsdb *pTsdb); +int tsdbPrepareCommit(STsdb *pTsdb); typedef enum { TSDB_FILE_HEAD = 0, // .head TSDB_FILE_DATA, // .data @@ -179,8 +180,17 @@ struct STsdbFS { int tsdbLockRepo(STsdb *pTsdb); int tsdbUnlockRepo(STsdb *pTsdb); -static FORCE_INLINE STSchema *tsdbGetTableSchemaImpl(STable *pTable, bool lock, bool copy, int32_t version) { - return pTable->pSchema; +static FORCE_INLINE STSchema *tsdbGetTableSchemaImpl(STsdb *pTsdb, STable *pTable, bool lock, bool copy, + int32_t version) { + if ((version < 0) || (schemaVersion(pTable->pSchema) == version)) { + return pTable->pSchema; + } + + if (!pTable->pCacheSchema || (schemaVersion(pTable->pCacheSchema) != version)) { + taosMemoryFreeClear(pTable->pCacheSchema); + pTable->pCacheSchema = metaGetTbTSchema(REPO_META(pTsdb), pTable->uid, version); + } + return pTable->pCacheSchema; } // tsdbMemTable.h diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index 23825e6f4a1085f3104414110814853a319c98e8..0e67d9e426f1b708e927d986f7c9d797acc8759d 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -47,15 +47,17 @@ extern "C" { #endif -typedef struct SVnodeInfo SVnodeInfo; -typedef struct SMeta SMeta; -typedef struct SSma SSma; -typedef struct STsdb STsdb; -typedef struct STQ STQ; -typedef struct SVState SVState; -typedef struct SVBufPool SVBufPool; -typedef struct SQWorker SQHandle; -typedef struct STsdbKeepCfg STsdbKeepCfg; +typedef struct SVnodeInfo SVnodeInfo; +typedef struct SMeta SMeta; +typedef struct SSma SSma; +typedef struct STsdb STsdb; +typedef struct STQ STQ; +typedef struct SVState SVState; +typedef struct SVBufPool SVBufPool; +typedef struct SQWorker SQHandle; +typedef struct STsdbKeepCfg STsdbKeepCfg; +typedef struct SMetaSnapshotReader SMetaSnapshotReader; +typedef struct STsdbSnapshotReader STsdbSnapshotReader; #define VNODE_META_DIR "meta" #define VNODE_TSDB_DIR "tsdb" @@ -67,8 +69,10 @@ typedef struct STsdbKeepCfg STsdbKeepCfg; #define VNODE_RSMA2_DIR "rsma2" // vnd.h -void* vnodeBufPoolMalloc(SVBufPool* pPool, int size); -void vnodeBufPoolFree(SVBufPool* pPool, void* p); +void* vnodeBufPoolMalloc(SVBufPool* pPool, int size); +void vnodeBufPoolFree(SVBufPool* pPool, void* p); +int32_t vnodeRealloc(void** pp, int32_t size); +void vnodeFree(void* p); // meta typedef struct SMCtbCursor SMCtbCursor; @@ -87,6 +91,7 @@ int metaAlterTable(SMeta* pMeta, int64_t version, SVAlterTbReq* pReq SSchemaWrapper* metaGetTableSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver, bool isinline); STSchema* metaGetTbTSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver); int metaGetTableEntryByName(SMetaReader* pReader, const char* name); +tb_uid_t metaGetTableEntryUidByName(SMeta* pMeta, const char* name); int metaGetTbNum(SMeta* pMeta); SMCtbCursor* metaOpenCtbCursor(SMeta* pMeta, tb_uid_t uid); void metaCloseCtbCursor(SMCtbCursor* pCtbCur); @@ -95,6 +100,10 @@ STSma* metaGetSmaInfoByIndex(SMeta* pMeta, int64_t indexUid); STSmaWrapper* metaGetSmaInfoByTable(SMeta* pMeta, tb_uid_t uid, bool deepCopy); SArray* metaGetSmaIdsByTable(SMeta* pMeta, tb_uid_t uid); SArray* metaGetSmaTbUids(SMeta* pMeta); +int32_t metaSnapshotReaderOpen(SMeta* pMeta, SMetaSnapshotReader** ppReader, int64_t sver, int64_t ever); +int32_t metaSnapshotReaderClose(SMetaSnapshotReader* pReader); +int32_t metaSnapshotRead(SMetaSnapshotReader* pReader, void** ppData, uint32_t* nData); +void* metaGetIdx(SMeta* pMeta); int32_t metaCreateTSma(SMeta* pMeta, int64_t version, SSmaCfg* pCfg); int32_t metaDropTSma(SMeta* pMeta, int64_t indexUid); @@ -104,14 +113,16 @@ int tsdbOpen(SVnode* pVnode, STsdb** ppTsdb, const char* dir, STsdbKeep int tsdbClose(STsdb** pTsdb); int tsdbBegin(STsdb* pTsdb); int tsdbCommit(STsdb* pTsdb); -int tsdbScanAndConvertSubmitMsg(STsdb* pTsdb, const SSubmitReq* pMsg); +int tsdbScanAndConvertSubmitMsg(STsdb* pTsdb, SSubmitReq* pMsg); int tsdbInsertData(STsdb* pTsdb, int64_t version, SSubmitReq* pMsg, SSubmitRsp* pRsp); int tsdbInsertTableData(STsdb* pTsdb, SSubmitMsgIter* pMsgIter, SSubmitBlk* pBlock, SSubmitBlkRsp* pRsp); -tsdbReaderT* tsdbQueryTables(SVnode* pVnode, SQueryTableDataCond* pCond, STableGroupInfo* groupList, uint64_t qId, +tsdbReaderT* tsdbQueryTables(SVnode* pVnode, SQueryTableDataCond* pCond, STableListInfo* tableList, uint64_t qId, uint64_t taskId); -tsdbReaderT tsdbQueryCacheLastT(STsdb* tsdb, SQueryTableDataCond* pCond, STableGroupInfo* groupList, uint64_t qId, +tsdbReaderT tsdbQueryCacheLastT(STsdb* tsdb, SQueryTableDataCond* pCond, STableListInfo* tableList, uint64_t qId, void* pMemRef); -int32_t tsdbGetTableGroupFromIdListT(STsdb* tsdb, SArray* pTableIdList, STableGroupInfo* pGroupInfo); +int32_t tsdbSnapshotReaderOpen(STsdb* pTsdb, STsdbSnapshotReader** ppReader, int64_t sver, int64_t ever); +int32_t tsdbSnapshotReaderClose(STsdbSnapshotReader* pReader); +int32_t tsdbSnapshotRead(STsdbSnapshotReader* pReader, void** ppData, uint32_t* nData); // tq STQ* tqOpen(const char* path, SVnode* pVnode, SWal* pWal); @@ -123,11 +134,7 @@ int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen); int32_t tqProcessVgDeleteReq(STQ* pTq, char* msg, int32_t msgLen); int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId); int32_t tqProcessTaskDeploy(STQ* pTq, char* msg, int32_t msgLen); -#if 0 -int32_t tqProcessTaskExec(STQ* pTq, char* msg, int32_t msgLen, int32_t workerId); -int32_t tqProcessStreamTrigger(STQ* pTq, void* data, int32_t dataLen, int32_t workerId); -#endif -int32_t tqProcessStreamTriggerNew(STQ* pTq, SSubmitReq* data); +int32_t tqProcessStreamTrigger(STQ* pTq, SSubmitReq* data); int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg); int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg); int32_t tqProcessTaskRecoverReq(STQ* pTq, SRpcMsg* pMsg); diff --git a/source/dnode/vnode/src/meta/metaEntry.c b/source/dnode/vnode/src/meta/metaEntry.c index 8a4db3100d31695af2889367088c9a0e16bb6236..be2ddfc32f83fcf0d6b5500fb21cdec632c27aa8 100644 --- a/source/dnode/vnode/src/meta/metaEntry.c +++ b/source/dnode/vnode/src/meta/metaEntry.c @@ -24,7 +24,7 @@ int metaEncodeEntry(SEncoder *pCoder, const SMetaEntry *pME) { if (tEncodeCStr(pCoder, pME->name) < 0) return -1; if (pME->type == TSDB_SUPER_TABLE) { - if (tEncodeSSchemaWrapper(pCoder, &pME->stbEntry.schema) < 0) return -1; + if (tEncodeSSchemaWrapper(pCoder, &pME->stbEntry.schemaRow) < 0) return -1; if (tEncodeSSchemaWrapper(pCoder, &pME->stbEntry.schemaTag) < 0) return -1; } else if (pME->type == TSDB_CHILD_TABLE) { if (tEncodeI64(pCoder, pME->ctbEntry.ctime) < 0) return -1; @@ -35,7 +35,7 @@ int metaEncodeEntry(SEncoder *pCoder, const SMetaEntry *pME) { if (tEncodeI64(pCoder, pME->ntbEntry.ctime) < 0) return -1; if (tEncodeI32(pCoder, pME->ntbEntry.ttlDays) < 0) return -1; if (tEncodeI32v(pCoder, pME->ntbEntry.ncid) < 0) return -1; - if (tEncodeSSchemaWrapper(pCoder, &pME->ntbEntry.schema) < 0) return -1; + if (tEncodeSSchemaWrapper(pCoder, &pME->ntbEntry.schemaRow) < 0) return -1; } else if (pME->type == TSDB_TSMA_TABLE) { if (tEncodeTSma(pCoder, pME->smaEntry.tsma) < 0) return -1; } else { @@ -56,7 +56,7 @@ int metaDecodeEntry(SDecoder *pCoder, SMetaEntry *pME) { if (tDecodeCStr(pCoder, &pME->name) < 0) return -1; if (pME->type == TSDB_SUPER_TABLE) { - if (tDecodeSSchemaWrapperEx(pCoder, &pME->stbEntry.schema) < 0) return -1; + if (tDecodeSSchemaWrapperEx(pCoder, &pME->stbEntry.schemaRow) < 0) return -1; if (tDecodeSSchemaWrapperEx(pCoder, &pME->stbEntry.schemaTag) < 0) return -1; } else if (pME->type == TSDB_CHILD_TABLE) { if (tDecodeI64(pCoder, &pME->ctbEntry.ctime) < 0) return -1; @@ -67,7 +67,7 @@ int metaDecodeEntry(SDecoder *pCoder, SMetaEntry *pME) { if (tDecodeI64(pCoder, &pME->ntbEntry.ctime) < 0) return -1; if (tDecodeI32(pCoder, &pME->ntbEntry.ttlDays) < 0) return -1; if (tDecodeI32v(pCoder, &pME->ntbEntry.ncid) < 0) return -1; - if (tDecodeSSchemaWrapperEx(pCoder, &pME->ntbEntry.schema) < 0) return -1; + if (tDecodeSSchemaWrapperEx(pCoder, &pME->ntbEntry.schemaRow) < 0) return -1; } else if (pME->type == TSDB_TSMA_TABLE) { pME->smaEntry.tsma = tDecoderMalloc(pCoder, sizeof(STSma)); if (!pME->smaEntry.tsma) { diff --git a/source/dnode/vnode/src/meta/metaIdx.c b/source/dnode/vnode/src/meta/metaIdx.c index 3f52071315bf15ca15beb2a14105c757d1b2eb25..efa06d2d1fea29995522e95b49789c6df3c2c435 100644 --- a/source/dnode/vnode/src/meta/metaIdx.c +++ b/source/dnode/vnode/src/meta/metaIdx.c @@ -53,10 +53,10 @@ int metaOpenIdx(SMeta *pMeta) { #endif #ifdef USE_INVERTED_INDEX - SIndexOpts opts; - if (indexOpen(&opts, pMeta->path, &pMeta->pIdx->pIdx) != 0) { - return -1; - } + // SIndexOpts opts; + // if (indexOpen(&opts, pMeta->path, &pMeta->pIdx->pIdx) != 0) { + // return -1; + //} #endif return 0; @@ -71,36 +71,37 @@ void metaCloseIdx(SMeta *pMeta) { /* TODO */ #endif #ifdef USE_INVERTED_INDEX - SIndexOpts opts; - if (indexClose(pMeta->pIdx->pIdx) != 0) { - return -1; - } + // SIndexOpts opts; + // if (indexClose(pMeta->pIdx->pIdx) != 0) { + // return -1; + //} + // return 0; #endif } int metaSaveTableToIdx(SMeta *pMeta, const STbCfg *pTbCfg) { #ifdef USE_INVERTED_INDEX - if (pTbCfgs->type == META_CHILD_TABLE) { - char buf[8] = {0}; - int16_t colId = (kvRowColIdx(pTbCfg->ctbCfg.pTag))[0].colId; - sprintf(buf, "%d", colId); // colname - - char *pTagVal = (char *)tdGetKVRowValOfCol(pTbCfg->ctbCfg.pTag, (kvRowColIdx(pTbCfg->ctbCfg.pTag))[0].colId); - - tb_uid_t suid = pTbCfg->ctbCfg.suid; // super id - tb_uid_t tuid = 0; // child table uid - SIndexMultiTerm *terms = indexMultiTermCreate(); - SIndexTerm *term = - indexTermCreate(suid, ADD_VALUE, TSDB_DATA_TYPE_BINARY, buf, strlen(buf), pTagVal, strlen(pTagVal), tuid); - indexMultiTermAdd(terms, term); - - int ret = indexPut(pMeta->pIdx->pIdx, terms); - indexMultiTermDestroy(terms); - return ret; - } else { - return DB_DONOTINDEX; - } + // if (pTbCfgs->type == META_CHILD_TABLE) { + // char buf[8] = {0}; + // int16_t colId = (kvRowColIdx(pTbCfg->ctbCfg.pTag))[0].colId; + // sprintf(buf, "%d", colId); // colname + + // char *pTagVal = (char *)tdGetKVRowValOfCol(pTbCfg->ctbCfg.pTag, (kvRowColIdx(pTbCfg->ctbCfg.pTag))[0].colId); + + // tb_uid_t suid = pTbCfg->ctbCfg.suid; // super id + // tb_uid_t tuid = 0; // child table uid + // SIndexMultiTerm *terms = indexMultiTermCreate(); + // SIndexTerm *term = + // indexTermCreate(suid, ADD_VALUE, TSDB_DATA_TYPE_BINARY, buf, strlen(buf), pTagVal, strlen(pTagVal), tuid); + // indexMultiTermAdd(terms, term); + + // int ret = indexPut(pMeta->pIdx->pIdx, terms); + // indexMultiTermDestroy(terms); + // return ret; + //} else { + // return DB_DONOTINDEX; + //} #endif // TODO return 0; @@ -112,4 +113,4 @@ int metaRemoveTableFromIdx(SMeta *pMeta, tb_uid_t uid) { #endif // TODO return 0; -} \ No newline at end of file +} diff --git a/source/dnode/vnode/src/meta/metaOpen.c b/source/dnode/vnode/src/meta/metaOpen.c index 9a97357b97ac8e637d7a8cf72139b6615e7fdb05..f23e7f88056d6a397a5979bda11dd4f080ba0212 100644 --- a/source/dnode/vnode/src/meta/metaOpen.c +++ b/source/dnode/vnode/src/meta/metaOpen.c @@ -93,11 +93,24 @@ int metaOpen(SVnode *pVnode, SMeta **ppMeta) { } // open pTagIdx +#ifdef USE_INVERTED_INDEX + // TODO(yihaoDeng), refactor later + char indexFullPath[128] = {0}; + sprintf(indexFullPath, "%s/%s", pMeta->path, "invert"); + taosMkDir(indexFullPath); + ret = indexOpen(indexOptsCreate(), indexFullPath, (SIndex **)&pMeta->pTagIvtIdx); + if (ret < 0) { + metaError("vgId:%d failed to open meta tag index since %s", TD_VID(pVnode), tstrerror(terrno)); + goto _err; + } + +#else ret = tdbTbOpen("tag.idx", -1, 0, tagIdxKeyCmpr, pMeta->pEnv, &pMeta->pTagIdx); if (ret < 0) { metaError("vgId:%d failed to open meta tag index since %s", TD_VID(pVnode), tstrerror(terrno)); goto _err; } +#endif // open pTtlIdx ret = tdbTbOpen("ttl.idx", sizeof(STtlIdxKey), 0, ttlIdxKeyCmpr, pMeta->pEnv, &pMeta->pTtlIdx); @@ -128,7 +141,11 @@ _err: if (pMeta->pIdx) metaCloseIdx(pMeta); if (pMeta->pSmaIdx) tdbTbClose(pMeta->pSmaIdx); if (pMeta->pTtlIdx) tdbTbClose(pMeta->pTtlIdx); +#ifdef USE_INVERTED_INDEX + if (pMeta->pTagIvtIdx) indexClose(pMeta->pTagIvtIdx); +#else if (pMeta->pTagIdx) tdbTbClose(pMeta->pTagIdx); +#endif if (pMeta->pCtbIdx) tdbTbClose(pMeta->pCtbIdx); if (pMeta->pNameIdx) tdbTbClose(pMeta->pNameIdx); if (pMeta->pUidIdx) tdbTbClose(pMeta->pUidIdx); @@ -145,7 +162,11 @@ int metaClose(SMeta *pMeta) { if (pMeta->pIdx) metaCloseIdx(pMeta); if (pMeta->pSmaIdx) tdbTbClose(pMeta->pSmaIdx); if (pMeta->pTtlIdx) tdbTbClose(pMeta->pTtlIdx); +#ifdef USE_INVERTED_INDEX + if (pMeta->pTagIvtIdx) indexClose(pMeta->pTagIvtIdx); +#else if (pMeta->pTagIdx) tdbTbClose(pMeta->pTagIdx); +#endif if (pMeta->pCtbIdx) tdbTbClose(pMeta->pCtbIdx); if (pMeta->pNameIdx) tdbTbClose(pMeta->pNameIdx); if (pMeta->pUidIdx) tdbTbClose(pMeta->pUidIdx); diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index 2bcb68c82a4dc9b4eabbe05e84d754fec860ea72..921c8a90a83dbd594cf6c2b0666b3bc87a9022f6 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -81,6 +81,19 @@ int metaGetTableEntryByName(SMetaReader *pReader, const char *name) { return metaGetTableEntryByUid(pReader, uid); } +tb_uid_t metaGetTableEntryUidByName(SMeta *pMeta, const char *name) { + void *pData = NULL; + int nData = 0; + tb_uid_t uid = 0; + + if (tdbTbGet(pMeta->pNameIdx, name, strlen(name) + 1, &pData, &nData) == 0) { + uid = *(tb_uid_t *)pData; + tdbFree(pData); + } + + return 0; +} + int metaReadNext(SMetaReader *pReader) { SMeta *pMeta = pReader->pMeta; @@ -142,44 +155,53 @@ int metaTbCursorNext(SMTbCursor *pTbCur) { } SSchemaWrapper *metaGetTableSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver, bool isinline) { - void *pKey = NULL; - void *pVal = NULL; - int kLen = 0; - int vLen = 0; - int ret; - SSkmDbKey skmDbKey; - SSchemaWrapper *pSW = NULL; - SSchema *pSchema = NULL; - void *pBuf; - SDecoder coder = {0}; - - // fetch - skmDbKey.uid = uid; - skmDbKey.sver = sver; - pKey = &skmDbKey; - kLen = sizeof(skmDbKey); + void *pData = NULL; + int nData = 0; + int64_t version; + SSchemaWrapper schema = {0}; + SSchemaWrapper *pSchema = NULL; + SDecoder dc = {0}; + metaRLock(pMeta); - ret = tdbTbGet(pMeta->pSkmDb, pKey, kLen, &pVal, &vLen); - metaULock(pMeta); - if (ret < 0) { - return NULL; - } + if (sver < 0) { + if (tdbTbGet(pMeta->pUidIdx, &uid, sizeof(uid), &pData, &nData) < 0) { + goto _err; + } - // decode - pBuf = pVal; - pSW = taosMemoryMalloc(sizeof(SSchemaWrapper)); + version = *(int64_t *)pData; - tDecoderInit(&coder, pVal, vLen); - tDecodeSSchemaWrapper(&coder, pSW); - pSchema = taosMemoryMalloc(sizeof(SSchema) * pSW->nCols); - memcpy(pSchema, pSW->pSchema, sizeof(SSchema) * pSW->nCols); - tDecoderClear(&coder); + tdbTbGet(pMeta->pTbDb, &(STbDbKey){.uid = uid, .version = version}, sizeof(STbDbKey), &pData, &nData); - pSW->pSchema = pSchema; + SMetaEntry me = {0}; + tDecoderInit(&dc, pData, nData); + metaDecodeEntry(&dc, &me); + if (me.type == TSDB_SUPER_TABLE) { + pSchema = tCloneSSchemaWrapper(&me.stbEntry.schemaRow); + } else if (me.type == TSDB_NORMAL_TABLE) { + pSchema = tCloneSSchemaWrapper(&me.ntbEntry.schemaRow); + } else { + ASSERT(0); + } + tDecoderClear(&dc); + } else { + if (tdbTbGet(pMeta->pSkmDb, &(SSkmDbKey){.uid = uid, .sver = sver}, sizeof(SSkmDbKey), &pData, &nData) < 0) { + goto _err; + } - tdbFree(pVal); + tDecoderInit(&dc, pData, nData); + tDecodeSSchemaWrapper(&dc, &schema); + pSchema = tCloneSSchemaWrapper(&schema); + tDecoderClear(&dc); + } - return pSW; + metaULock(pMeta); + tdbFree(pData); + return pSchema; + +_err: + metaULock(pMeta); + tdbFree(pData); + return NULL; } struct SMCtbCursor { @@ -278,12 +300,13 @@ STSchema *metaGetTbTSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver) { pSW = metaGetTableSchema(pMeta, quid, sver, 0); if (!pSW) return NULL; - tdInitTSchemaBuilder(&sb, 0); + tdInitTSchemaBuilder(&sb, pSW->version); for (int i = 0; i < pSW->nCols; i++) { pSchema = pSW->pSchema + i; tdAddColToSchema(&sb, pSchema->type, pSchema->flags, pSchema->colId, pSchema->bytes); } pTSchema = tdGetSchemaFromBuilder(&sb); + tdDestroyTSchemaBuilder(&sb); taosMemoryFree(pSW->pSchema); diff --git a/source/dnode/vnode/src/meta/metaSnapshot.c b/source/dnode/vnode/src/meta/metaSnapshot.c new file mode 100644 index 0000000000000000000000000000000000000000..5757039d55d410808b4eeb57d2e09286b7939004 --- /dev/null +++ b/source/dnode/vnode/src/meta/metaSnapshot.c @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "meta.h" + +struct SMetaSnapshotReader { + SMeta* pMeta; + TBC* pTbc; + int64_t sver; + int64_t ever; +}; + +int32_t metaSnapshotReaderOpen(SMeta* pMeta, SMetaSnapshotReader** ppReader, int64_t sver, int64_t ever) { + int32_t code = 0; + int32_t c = 0; + SMetaSnapshotReader* pMetaReader = NULL; + + pMetaReader = (SMetaSnapshotReader*)taosMemoryCalloc(1, sizeof(*pMetaReader)); + if (pMetaReader == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } + pMetaReader->pMeta = pMeta; + pMetaReader->sver = sver; + pMetaReader->ever = ever; + code = tdbTbcOpen(pMeta->pTbDb, &pMetaReader->pTbc, NULL); + if (code) { + goto _err; + } + + code = tdbTbcMoveTo(pMetaReader->pTbc, &(STbDbKey){.version = sver, .uid = INT64_MIN}, sizeof(STbDbKey), &c); + if (code) { + goto _err; + } + + *ppReader = pMetaReader; + return code; + +_err: + *ppReader = NULL; + return code; +} + +int32_t metaSnapshotReaderClose(SMetaSnapshotReader* pReader) { + if (pReader) { + tdbTbcClose(pReader->pTbc); + taosMemoryFree(pReader); + } + return 0; +} + +int32_t metaSnapshotRead(SMetaSnapshotReader* pReader, void** ppData, uint32_t* nDatap) { + const void* pKey = NULL; + const void* pData = NULL; + int32_t nKey = 0; + int32_t nData = 0; + int32_t code = 0; + + for (;;) { + code = tdbTbcGet(pReader->pTbc, &pKey, &nKey, &pData, &nData); + if (code || ((STbDbKey*)pData)->version > pReader->ever) { + return TSDB_CODE_VND_READ_END; + } + + if (((STbDbKey*)pData)->version < pReader->sver) { + continue; + } + + break; + } + + // copy the data + if (vnodeRealloc(ppData, nData) < 0) { + code = TSDB_CODE_OUT_OF_MEMORY; + return code; + } + + memcpy(*ppData, pData, nData); + *nDatap = nData; + return code; +} \ No newline at end of file diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index f548904a7a575da65a6fe5454e389b4d7706e168..f610f18126ef86a268801f73f5a951c97a380867 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -23,6 +23,7 @@ static int metaUpdateTtlIdx(SMeta *pMeta, const SMetaEntry *pME); static int metaSaveToSkmDb(SMeta *pMeta, const SMetaEntry *pME); static int metaUpdateCtbIdx(SMeta *pMeta, const SMetaEntry *pME); static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry); +static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type); int metaCreateSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) { SMetaEntry me = {0}; @@ -30,9 +31,9 @@ int metaCreateSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) { int vLen = 0; const void *pKey = NULL; const void *pVal = NULL; - void *pBuf = NULL; + void * pBuf = NULL; int32_t szBuf = 0; - void *p = NULL; + void * p = NULL; SMetaReader mr = {0}; // validate req @@ -55,7 +56,7 @@ int metaCreateSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) { me.type = TSDB_SUPER_TABLE; me.uid = pReq->suid; me.name = pReq->name; - me.stbEntry.schema = pReq->schema; + me.stbEntry.schemaRow = pReq->schemaRow; me.stbEntry.schemaTag = pReq->schemaTag; if (metaHandleEntry(pMeta, &me) < 0) goto _err; @@ -71,71 +72,78 @@ _err: } int metaDropSTable(SMeta *pMeta, int64_t verison, SVDropStbReq *pReq) { - TBC *pNameIdxc = NULL; - TBC *pUidIdxc = NULL; - TBC *pCtbIdxc = NULL; - SCtbIdxKey *pCtbIdxKey; - const void *pKey = NULL; - int nKey; - const void *pData = NULL; - int nData; - int c, ret; - - // prepare uid idx cursor - tdbTbcOpen(pMeta->pUidIdx, &pUidIdxc, &pMeta->txn); - ret = tdbTbcMoveTo(pUidIdxc, &pReq->suid, sizeof(tb_uid_t), &c); - if (ret < 0 || c != 0) { - terrno = TSDB_CODE_VND_TB_NOT_EXIST; - tdbTbcClose(pUidIdxc); - goto _err; - } - - // prepare name idx cursor - tdbTbcOpen(pMeta->pNameIdx, &pNameIdxc, &pMeta->txn); - ret = tdbTbcMoveTo(pNameIdxc, pReq->name, strlen(pReq->name) + 1, &c); - if (ret < 0 || c != 0) { - ASSERT(0); + void *pKey = NULL; + int nKey = 0; + void *pData = NULL; + int nData = 0; + int c = 0; + int rc = 0; + + // check if super table exists + rc = tdbTbGet(pMeta->pNameIdx, pReq->name, strlen(pReq->name) + 1, &pData, &nData); + if (rc < 0 || *(tb_uid_t *)pData != pReq->suid) { + terrno = TSDB_CODE_VND_TABLE_NOT_EXIST; + return -1; } - tdbTbcDelete(pUidIdxc); - tdbTbcDelete(pNameIdxc); - tdbTbcClose(pUidIdxc); - tdbTbcClose(pNameIdxc); + // drop all child tables + TBC * pCtbIdxc = NULL; + SArray *pArray = taosArrayInit(8, sizeof(tb_uid_t)); - // loop to drop each child table tdbTbcOpen(pMeta->pCtbIdx, &pCtbIdxc, &pMeta->txn); - ret = tdbTbcMoveTo(pCtbIdxc, &(SCtbIdxKey){.suid = pReq->suid, .uid = INT64_MIN}, sizeof(SCtbIdxKey), &c); - if (ret < 0 || (c < 0 && tdbTbcMoveToNext(pCtbIdxc) < 0)) { + rc = tdbTbcMoveTo(pCtbIdxc, &(SCtbIdxKey){.suid = pReq->suid, .uid = INT64_MIN}, sizeof(SCtbIdxKey), &c); + if (rc < 0) { tdbTbcClose(pCtbIdxc); - goto _exit; + metaWLock(pMeta); + goto _drop_super_table; } for (;;) { - tdbTbcGet(pCtbIdxc, &pKey, &nKey, NULL, NULL); - pCtbIdxKey = (SCtbIdxKey *)pKey; + rc = tdbTbcNext(pCtbIdxc, &pKey, &nKey, NULL, NULL); + if (rc < 0) break; - if (pCtbIdxKey->suid > pReq->suid) break; + if (((SCtbIdxKey *)pKey)->suid < pReq->suid) { + continue; + } else if (((SCtbIdxKey *)pKey)->suid > pReq->suid) { + break; + } - // drop the child table (TODO) + taosArrayPush(pArray, &(((SCtbIdxKey *)pKey)->uid)); + } + + tdbTbcClose(pCtbIdxc); + + metaWLock(pMeta); - if (tdbTbcMoveToNext(pCtbIdxc) < 0) break; + for (int32_t iChild = 0; iChild < taosArrayGetSize(pArray); iChild++) { + tb_uid_t uid = *(tb_uid_t *)taosArrayGet(pArray, iChild); + metaDropTableByUid(pMeta, uid, NULL); } + taosArrayDestroy(pArray); + + // drop super table +_drop_super_table: + tdbTbGet(pMeta->pUidIdx, &pReq->suid, sizeof(tb_uid_t), &pData, &nData); + tdbTbDelete(pMeta->pTbDb, &(STbDbKey){.version = *(int64_t *)pData, .uid = pReq->suid}, sizeof(STbDbKey), + &pMeta->txn); + tdbTbDelete(pMeta->pNameIdx, pReq->name, strlen(pReq->name) + 1, &pMeta->txn); + tdbTbDelete(pMeta->pUidIdx, &pReq->suid, sizeof(tb_uid_t), &pMeta->txn); + + metaULock(pMeta); + _exit: + tdbFree(pKey); + tdbFree(pData); metaDebug("vgId:%d super table %s uid:%" PRId64 " is dropped", TD_VID(pMeta->pVnode), pReq->name, pReq->suid); return 0; - -_err: - metaError("vgId:%d failed to drop super table %s uid:%" PRId64 " since %s", TD_VID(pMeta->pVnode), pReq->name, - pReq->suid, tstrerror(terrno)); - return -1; } int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) { SMetaEntry oStbEntry = {0}; SMetaEntry nStbEntry = {0}; - TBC *pUidIdxc = NULL; - TBC *pTbDbc = NULL; + TBC * pUidIdxc = NULL; + TBC * pTbDbc = NULL; const void *pData; int nData; int64_t oversion; @@ -165,22 +173,22 @@ int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) { ret = tdbTbcGet(pTbDbc, NULL, NULL, &pData, &nData); ASSERT(ret == 0); - tDecoderInit(&dc, pData, nData); + oStbEntry.pBuf = taosMemoryMalloc(nData); + memcpy(oStbEntry.pBuf, pData, nData); + tDecoderInit(&dc, oStbEntry.pBuf, nData); metaDecodeEntry(&dc, &oStbEntry); nStbEntry.version = version; nStbEntry.type = TSDB_SUPER_TABLE; nStbEntry.uid = pReq->suid; nStbEntry.name = pReq->name; - nStbEntry.stbEntry.schema = pReq->schema; + nStbEntry.stbEntry.schemaRow = pReq->schemaRow; nStbEntry.stbEntry.schemaTag = pReq->schemaTag; metaWLock(pMeta); // compare two entry - if (oStbEntry.stbEntry.schema.sver != pReq->schema.sver) { - if (oStbEntry.stbEntry.schema.nCols != pReq->schema.nCols) { - metaSaveToSkmDb(pMeta, &nStbEntry); - } + if (oStbEntry.stbEntry.schemaRow.version != pReq->schemaRow.version) { + metaSaveToSkmDb(pMeta, &nStbEntry); } // if (oStbEntry.stbEntry.schemaTag.sver != pReq->schemaTag.sver) { @@ -193,6 +201,7 @@ int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) { // update uid index tdbTbcUpsert(pUidIdxc, &pReq->suid, sizeof(tb_uid_t), &version, sizeof(version), 0); + if (oStbEntry.pBuf) taosMemoryFree(oStbEntry.pBuf); metaULock(pMeta); tDecoderClear(&dc); tdbTbcClose(pTbDbc); @@ -220,9 +229,6 @@ int metaCreateTable(SMeta *pMeta, int64_t version, SVCreateTbReq *pReq) { terrno = TSDB_CODE_TDB_TABLE_ALREADY_EXIST; metaReaderClear(&mr); return -1; - } else { - pReq->uid = tGenIdPI64(); - pReq->ctime = taosGetTimestampMs(); } metaReaderClear(&mr); @@ -239,8 +245,8 @@ int metaCreateTable(SMeta *pMeta, int64_t version, SVCreateTbReq *pReq) { } else { me.ntbEntry.ctime = pReq->ctime; me.ntbEntry.ttlDays = pReq->ttl; - me.ntbEntry.schema = pReq->ntb.schema; - me.ntbEntry.ncid = me.ntbEntry.schema.pSchema[me.ntbEntry.schema.nCols - 1].colId + 1; + me.ntbEntry.schemaRow = pReq->ntb.schemaRow; + me.ntbEntry.ncid = me.ntbEntry.schemaRow.pSchema[me.ntbEntry.schemaRow.nCols - 1].colId + 1; } if (metaHandleEntry(pMeta, &me) < 0) goto _err; @@ -256,135 +262,76 @@ _err: } int metaDropTable(SMeta *pMeta, int64_t version, SVDropTbReq *pReq, SArray *tbUids) { - TBC *pTbDbc = NULL; - TBC *pUidIdxc = NULL; - TBC *pNameIdxc = NULL; - const void *pData; - int nData; - tb_uid_t uid; - int64_t tver; - SMetaEntry me = {0}; - SDecoder coder = {0}; - int8_t type; - int64_t ctime; - tb_uid_t suid; - int c = 0, ret; - - // search & delete the name idx - tdbTbcOpen(pMeta->pNameIdx, &pNameIdxc, &pMeta->txn); - ret = tdbTbcMoveTo(pNameIdxc, pReq->name, strlen(pReq->name) + 1, &c); - if (ret < 0 || !tdbTbcIsValid(pNameIdxc) || c) { - tdbTbcClose(pNameIdxc); + void * pData = NULL; + int nData = 0; + int rc = 0; + tb_uid_t uid; + int type; + + rc = tdbTbGet(pMeta->pNameIdx, pReq->name, strlen(pReq->name) + 1, &pData, &nData); + if (rc < 0) { terrno = TSDB_CODE_VND_TABLE_NOT_EXIST; return -1; } - - ret = tdbTbcGet(pNameIdxc, NULL, NULL, &pData, &nData); - if (ret < 0) { - ASSERT(0); - return -1; - } - uid = *(tb_uid_t *)pData; - tdbTbcDelete(pNameIdxc); - tdbTbcClose(pNameIdxc); - - // search & delete uid idx - tdbTbcOpen(pMeta->pUidIdx, &pUidIdxc, &pMeta->txn); - ret = tdbTbcMoveTo(pUidIdxc, &uid, sizeof(uid), &c); - if (ret < 0 || c != 0) { - ASSERT(0); - return -1; - } - - ret = tdbTbcGet(pUidIdxc, NULL, NULL, &pData, &nData); - if (ret < 0) { - ASSERT(0); - return -1; - } - - tver = *(int64_t *)pData; - tdbTbcDelete(pUidIdxc); - tdbTbcClose(pUidIdxc); - - // search and get meta entry - tdbTbcOpen(pMeta->pTbDb, &pTbDbc, &pMeta->txn); - ret = tdbTbcMoveTo(pTbDbc, &(STbDbKey){.uid = uid, .version = tver}, sizeof(STbDbKey), &c); - if (ret < 0 || c != 0) { - ASSERT(0); - return -1; - } + metaWLock(pMeta); + metaDropTableByUid(pMeta, uid, &type); + metaULock(pMeta); - ret = tdbTbcGet(pTbDbc, NULL, NULL, &pData, &nData); - if (ret < 0) { - ASSERT(0); - return -1; + if (type == TSDB_CHILD_TABLE && tbUids) { + taosArrayPush(tbUids, &uid); } - // decode entry - void *pDataCopy = taosMemoryMalloc(nData); // remove the copy (todo) - memcpy(pDataCopy, pData, nData); - tDecoderInit(&coder, pDataCopy, nData); - ret = metaDecodeEntry(&coder, &me); - if (ret < 0) { - ASSERT(0); - return -1; - } + tdbFree(pData); + return 0; +} - type = me.type; - if (type == TSDB_CHILD_TABLE) { - ctime = me.ctbEntry.ctime; - suid = me.ctbEntry.suid; - taosArrayPush(tbUids, &me.uid); - } else if (type == TSDB_NORMAL_TABLE) { - ctime = me.ntbEntry.ctime; - suid = 0; - } else { - ASSERT(0); - } +static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) { + void * pData = NULL; + int nData = 0; + int rc = 0; + int64_t version; + SMetaEntry e = {0}; + SDecoder dc = {0}; - taosMemoryFree(pDataCopy); - tDecoderClear(&coder); - tdbTbcClose(pTbDbc); + rc = tdbTbGet(pMeta->pUidIdx, &uid, sizeof(uid), &pData, &nData); + version = *(int64_t *)pData; - if (type == TSDB_CHILD_TABLE) { - // remove the pCtbIdx - TBC *pCtbIdxc = NULL; - tdbTbcOpen(pMeta->pCtbIdx, &pCtbIdxc, &pMeta->txn); + tdbTbGet(pMeta->pTbDb, &(STbDbKey){.version = version, .uid = uid}, sizeof(STbDbKey), &pData, &nData); - ret = tdbTbcMoveTo(pCtbIdxc, &(SCtbIdxKey){.suid = suid, .uid = uid}, sizeof(SCtbIdxKey), &c); - if (ret < 0 || c != 0) { - ASSERT(0); - return -1; - } + tDecoderInit(&dc, pData, nData); + metaDecodeEntry(&dc, &e); - tdbTbcDelete(pCtbIdxc); - tdbTbcClose(pCtbIdxc); + if (type) *type = e.type; - // remove tags from pTagIdx (todo) - } else if (type == TSDB_NORMAL_TABLE) { - // remove from pSkmDb - } else { - ASSERT(0); + tdbTbDelete(pMeta->pTbDb, &(STbDbKey){.version = version, .uid = uid}, sizeof(STbDbKey), &pMeta->txn); + tdbTbDelete(pMeta->pNameIdx, e.name, strlen(e.name) + 1, &pMeta->txn); + tdbTbDelete(pMeta->pUidIdx, &uid, sizeof(uid), &pMeta->txn); + if (e.type == TSDB_CHILD_TABLE) { + tdbTbDelete(pMeta->pCtbIdx, &(SCtbIdxKey){.suid = e.ctbEntry.suid, .uid = uid}, sizeof(SCtbIdxKey), &pMeta->txn); + } else if (e.type == TSDB_NORMAL_TABLE) { + // drop schema.db (todo) + // drop ttl.idx (todo) + } else if (e.type == TSDB_SUPER_TABLE) { + // drop schema.db (todo) } - // remove from ttl (todo) - if (ctime > 0) { - } + tDecoderClear(&dc); + tdbFree(pData); return 0; } static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAlterTbReq) { - void *pVal = NULL; + void * pVal = NULL; int nVal = 0; - const void *pData = NULL; + const void * pData = NULL; int nData = 0; int ret = 0; tb_uid_t uid; int64_t oversion; - SSchema *pColumn = NULL; + SSchema * pColumn = NULL; SMetaEntry entry = {0}; SSchemaWrapper *pSchema; int c; @@ -420,7 +367,9 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl // get table entry SDecoder dc = {0}; - tDecoderInit(&dc, pData, nData); + entry.pBuf = taosMemoryMalloc(nData); + memcpy(entry.pBuf, pData, nData); + tDecoderInit(&dc, entry.pBuf, nData); ret = metaDecodeEntry(&dc, &entry); ASSERT(ret == 0); @@ -430,7 +379,7 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl } // search the column to add/drop/update - pSchema = &entry.ntbEntry.schema; + pSchema = &entry.ntbEntry.schemaRow; int32_t iCol = 0; for (;;) { pColumn = NULL; @@ -451,16 +400,16 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl terrno = TSDB_CODE_VND_COL_ALREADY_EXISTS; goto _err; } - pSchema->sver++; + pSchema->version++; pSchema->nCols++; pNewSchema = taosMemoryMalloc(sizeof(SSchema) * pSchema->nCols); memcpy(pNewSchema, pSchema->pSchema, sizeof(SSchema) * (pSchema->nCols - 1)); pSchema->pSchema = pNewSchema; - pSchema->pSchema[entry.ntbEntry.schema.nCols - 1].bytes = pAlterTbReq->bytes; - pSchema->pSchema[entry.ntbEntry.schema.nCols - 1].type = pAlterTbReq->type; - pSchema->pSchema[entry.ntbEntry.schema.nCols - 1].flags = pAlterTbReq->flags; - pSchema->pSchema[entry.ntbEntry.schema.nCols - 1].colId = entry.ntbEntry.ncid++; - strcpy(pSchema->pSchema[entry.ntbEntry.schema.nCols - 1].name, pAlterTbReq->colName); + pSchema->pSchema[entry.ntbEntry.schemaRow.nCols - 1].bytes = pAlterTbReq->bytes; + pSchema->pSchema[entry.ntbEntry.schemaRow.nCols - 1].type = pAlterTbReq->type; + pSchema->pSchema[entry.ntbEntry.schemaRow.nCols - 1].flags = pAlterTbReq->flags; + pSchema->pSchema[entry.ntbEntry.schemaRow.nCols - 1].colId = entry.ntbEntry.ncid++; + strcpy(pSchema->pSchema[entry.ntbEntry.schemaRow.nCols - 1].name, pAlterTbReq->colName); break; case TSDB_ALTER_TABLE_DROP_COLUMN: if (pColumn == NULL) { @@ -471,7 +420,7 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl terrno = TSDB_CODE_VND_INVALID_TABLE_ACTION; goto _err; } - pSchema->sver++; + pSchema->version++; tlen = (pSchema->nCols - iCol - 1) * sizeof(SSchema); if (tlen) { memmove(pColumn, pColumn + 1, tlen); @@ -487,7 +436,7 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl terrno = TSDB_CODE_VND_INVALID_TABLE_ACTION; goto _err; } - pSchema->sver++; + pSchema->version++; pColumn->bytes = pAlterTbReq->colModBytes; break; case TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME: @@ -495,7 +444,7 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl terrno = TSDB_CODE_VND_TABLE_COL_NOT_EXISTS; goto _err; } - pSchema->sver++; + pSchema->version++; strcpy(pColumn->name, pAlterTbReq->colNewName); break; } @@ -530,7 +479,7 @@ _err: static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pAlterTbReq) { SMetaEntry ctbEntry = {0}; SMetaEntry stbEntry = {0}; - void *pVal = NULL; + void * pVal = NULL; int nVal = 0; int ret; int c; @@ -561,7 +510,7 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA oversion = *(int64_t *)pData; // search table.db - TBC *pTbDbc = NULL; + TBC * pTbDbc = NULL; SDecoder dc1 = {0}; SDecoder dc2 = {0}; @@ -585,7 +534,7 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA metaDecodeEntry(&dc2, &stbEntry); SSchemaWrapper *pTagSchema = &stbEntry.stbEntry.schemaTag; - SSchema *pColumn = NULL; + SSchema * pColumn = NULL; int32_t iCol = 0; for (;;) { pColumn = NULL; @@ -605,31 +554,39 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA if (iCol == 0) { // TODO : need to update tag index } - ctbEntry.version = version; - SKVRowBuilder kvrb = {0}; - const SKVRow pOldTag = (const SKVRow)ctbEntry.ctbEntry.pTags; - SKVRow pNewTag = NULL; - - tdInitKVRowBuilder(&kvrb); - for (int32_t i = 0; i < pTagSchema->nCols; i++) { - SSchema *pCol = &pTagSchema->pSchema[i]; - if (iCol == i) { - tdAddColToKVRow(&kvrb, pCol->colId, pAlterTbReq->pTagVal, pAlterTbReq->nTagVal); - } else { - void *p = tdGetKVRowValOfCol(pOldTag, pCol->colId); - if (p) { - if (IS_VAR_DATA_TYPE(pCol->type)) { - tdAddColToKVRow(&kvrb, pCol->colId, p, varDataTLen(p)); - } else { - tdAddColToKVRow(&kvrb, pCol->colId, p, pCol->bytes); + if (pTagSchema->nCols == 1 && pTagSchema->pSchema[0].type == TSDB_DATA_TYPE_JSON) { + ctbEntry.ctbEntry.pTags = taosMemoryMalloc(pAlterTbReq->nTagVal); + if (ctbEntry.ctbEntry.pTags == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } + memcpy((void *)ctbEntry.ctbEntry.pTags, pAlterTbReq->pTagVal, pAlterTbReq->nTagVal); + } else { + SKVRowBuilder kvrb = {0}; + const SKVRow pOldTag = (const SKVRow)ctbEntry.ctbEntry.pTags; + SKVRow pNewTag = NULL; + + tdInitKVRowBuilder(&kvrb); + for (int32_t i = 0; i < pTagSchema->nCols; i++) { + SSchema *pCol = &pTagSchema->pSchema[i]; + if (iCol == i) { + tdAddColToKVRow(&kvrb, pCol->colId, pAlterTbReq->pTagVal, pAlterTbReq->nTagVal); + } else { + void *p = tdGetKVRowValOfCol(pOldTag, pCol->colId); + if (p) { + if (IS_VAR_DATA_TYPE(pCol->type)) { + tdAddColToKVRow(&kvrb, pCol->colId, p, varDataTLen(p)); + } else { + tdAddColToKVRow(&kvrb, pCol->colId, p, pCol->bytes); + } } } } - } - ctbEntry.ctbEntry.pTags = tdGetKVRowFromBuilder(&kvrb); - tdDestroyKVRowBuilder(&kvrb); + ctbEntry.ctbEntry.pTags = tdGetKVRowFromBuilder(&kvrb); + tdDestroyKVRowBuilder(&kvrb); + } // save to table.db metaSaveToTbDb(pMeta, &ctbEntry); @@ -639,6 +596,7 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA tDecoderClear(&dc1); tDecoderClear(&dc2); + if (ctbEntry.ctbEntry.pTags) taosMemoryFree((void *)ctbEntry.ctbEntry.pTags); if (ctbEntry.pBuf) taosMemoryFree(ctbEntry.pBuf); if (stbEntry.pBuf) tdbFree(stbEntry.pBuf); tdbTbcClose(pTbDbc); @@ -681,8 +639,8 @@ int metaAlterTable(SMeta *pMeta, int64_t version, SVAlterTbReq *pReq) { static int metaSaveToTbDb(SMeta *pMeta, const SMetaEntry *pME) { STbDbKey tbDbKey; - void *pKey = NULL; - void *pVal = NULL; + void * pKey = NULL; + void * pVal = NULL; int kLen = 0; int vLen = 0; SEncoder coder = {0}; @@ -797,14 +755,14 @@ static void metaDestroyTagIdxKey(STagIdxKey *pTagIdxKey) { } static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) { - void *pData = NULL; + void * pData = NULL; int nData = 0; STbDbKey tbDbKey = {0}; SMetaEntry stbEntry = {0}; - STagIdxKey *pTagIdxKey = NULL; + STagIdxKey * pTagIdxKey = NULL; int32_t nTagIdxKey; const SSchema *pTagColumn; // = &stbEntry.stbEntry.schema.pSchema[0]; - const void *pTagData = NULL; // + const void * pTagData = NULL; // SDecoder dc = {0}; // get super table @@ -820,37 +778,48 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) { pTagData = tdGetKVRowValOfCol((const SKVRow)pCtbEntry->ctbEntry.pTags, pTagColumn->colId); // update tag index +#ifdef USE_INVERTED_INDEX + tb_uid_t suid = pCtbEntry->ctbEntry.suid; + tb_uid_t tuid = pCtbEntry->uid; + + SIndexMultiTerm *tmGroup = indexMultiTermCreate(); + + SIndexTerm *tm = indexTermCreate(suid, ADD_VALUE, pTagColumn->type, pTagColumn->name, sizeof(pTagColumn->name), + pTagData, pTagData == NULL ? 0 : strlen(pTagData)); + indexMultiTermAdd(tmGroup, tm); + int ret = indexPut((SIndex *)pMeta->pTagIvtIdx, tmGroup, tuid); + indexMultiTermDestroy(tmGroup); +#else if (metaCreateTagIdxKey(pCtbEntry->ctbEntry.suid, pTagColumn->colId, pTagData, pTagColumn->type, pCtbEntry->uid, &pTagIdxKey, &nTagIdxKey) < 0) { return -1; } tdbTbInsert(pMeta->pTagIdx, pTagIdxKey, nTagIdxKey, NULL, 0, &pMeta->txn); metaDestroyTagIdxKey(pTagIdxKey); - +#endif tDecoderClear(&dc); tdbFree(pData); - return 0; } static int metaSaveToSkmDb(SMeta *pMeta, const SMetaEntry *pME) { SEncoder coder = {0}; - void *pVal = NULL; + void * pVal = NULL; int vLen = 0; int rcode = 0; SSkmDbKey skmDbKey = {0}; const SSchemaWrapper *pSW; if (pME->type == TSDB_SUPER_TABLE) { - pSW = &pME->stbEntry.schema; + pSW = &pME->stbEntry.schemaRow; } else if (pME->type == TSDB_NORMAL_TABLE) { - pSW = &pME->ntbEntry.schema; + pSW = &pME->ntbEntry.schemaRow; } else { ASSERT(0); } skmDbKey.uid = pME->uid; - skmDbKey.sver = pSW->sver; + skmDbKey.sver = pSW->version; // encode schema int32_t ret = 0; @@ -911,3 +880,11 @@ _err: metaULock(pMeta); return -1; } +// refactor later +void *metaGetIdx(SMeta *pMeta) { +#ifdef USE_INVERTED_INDEX + return pMeta->pTagIvtIdx; +#else + return pMeta->pTagIdx; +#endif +} diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c index 88af049d0bd298e58e51286e0980fd13a7872734..0769da12bcb478ace23e01be4be0fd9b75da5249 100644 --- a/source/dnode/vnode/src/sma/smaRollup.c +++ b/source/dnode/vnode/src/sma/smaRollup.c @@ -18,7 +18,7 @@ static FORCE_INLINE int32_t tdUidStorePut(STbUidStore *pStore, tb_uid_t suid, tb_uid_t *uid); static FORCE_INLINE int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids); static FORCE_INLINE int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType, qTaskInfo_t *taskInfo, - STSchema *pTSchema, tb_uid_t suid, tb_uid_t uid, int8_t level); + STSchema *pTSchema, tb_uid_t suid, int8_t level); struct SRSmaInfo { void *taskInfo[TSDB_RETENTION_L2]; // qTaskInfo_t @@ -364,7 +364,7 @@ static int32_t tdFetchSubmitReqSuids(SSubmitReq *pMsg, STbUidStore *pStore) { } static FORCE_INLINE int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType, qTaskInfo_t *taskInfo, - STSchema *pTSchema, tb_uid_t suid, tb_uid_t uid, int8_t level) { + STSchema *pTSchema, tb_uid_t suid, int8_t level) { SArray *pResult = NULL; if (!taskInfo) { @@ -374,7 +374,7 @@ static FORCE_INLINE int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int3 smaDebug("vgId:%d execute rsma %" PRIi8 " task for qTaskInfo:%p suid:%" PRIu64, SMA_VID(pSma), level, taskInfo, suid); - qSetStreamInput(taskInfo, pMsg, inputType); + qSetStreamInput(taskInfo, pMsg, inputType, true); while (1) { SSDataBlock *output = NULL; uint64_t ts; @@ -399,7 +399,7 @@ static FORCE_INLINE int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int3 blockDebugShowData(pResult); STsdb *sinkTsdb = (level == TSDB_RETENTION_L1 ? pSma->pRSmaTsdb1 : pSma->pRSmaTsdb2); SSubmitReq *pReq = NULL; - if (buildSubmitReqFromDataBlock(&pReq, pResult, pTSchema, SMA_VID(pSma), uid, suid) != 0) { + if (buildSubmitReqFromDataBlock(&pReq, pResult, pTSchema, SMA_VID(pSma), suid) != 0) { taosArrayDestroy(pResult); return TSDB_CODE_FAILED; } @@ -418,15 +418,13 @@ static FORCE_INLINE int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int3 return TSDB_CODE_SUCCESS; } -static int32_t tdExecuteRSma(SSma *pSma, const void *pMsg, int32_t inputType, tb_uid_t suid, tb_uid_t uid) { +static int32_t tdExecuteRSma(SSma *pSma, const void *pMsg, int32_t inputType, tb_uid_t suid) { SSmaEnv *pEnv = SMA_RSMA_ENV(pSma); if (!pEnv) { // only applicable when rsma env exists return TSDB_CODE_SUCCESS; } - ASSERT(uid != 0); // TODO: remove later - SSmaStat *pStat = SMA_ENV_STAT(pEnv); SRSmaInfo *pRSmaInfo = NULL; @@ -443,13 +441,13 @@ static int32_t tdExecuteRSma(SSma *pSma, const void *pMsg, int32_t inputType, tb if (inputType == STREAM_DATA_TYPE_SUBMIT_BLOCK) { // TODO: use the proper schema instead of 0, and cache STSchema in cache - STSchema *pTSchema = metaGetTbTSchema(SMA_META(pSma), suid, 1); + STSchema *pTSchema = metaGetTbTSchema(SMA_META(pSma), suid, -1); if (!pTSchema) { terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION; return TSDB_CODE_FAILED; } - tdExecuteRSmaImpl(pSma, pMsg, inputType, pRSmaInfo->taskInfo[0], pTSchema, suid, uid, TSDB_RETENTION_L1); - tdExecuteRSmaImpl(pSma, pMsg, inputType, pRSmaInfo->taskInfo[1], pTSchema, suid, uid, TSDB_RETENTION_L2); + tdExecuteRSmaImpl(pSma, pMsg, inputType, pRSmaInfo->taskInfo[0], pTSchema, suid, TSDB_RETENTION_L1); + tdExecuteRSmaImpl(pSma, pMsg, inputType, pRSmaInfo->taskInfo[1], pTSchema, suid, TSDB_RETENTION_L2); taosMemoryFree(pTSchema); } @@ -468,12 +466,12 @@ int32_t tdProcessRSmaSubmit(SSma *pSma, void *pMsg, int32_t inputType) { tdFetchSubmitReqSuids(pMsg, &uidStore); if (uidStore.suid != 0) { - tdExecuteRSma(pSma, pMsg, inputType, uidStore.suid, uidStore.uid); + tdExecuteRSma(pSma, pMsg, inputType, uidStore.suid); void *pIter = taosHashIterate(uidStore.uidHash, NULL); while (pIter) { tb_uid_t *pTbSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL); - tdExecuteRSma(pSma, pMsg, inputType, *pTbSuid, 0); + tdExecuteRSma(pSma, pMsg, inputType, *pTbSuid); pIter = taosHashIterate(uidStore.uidHash, pIter); } diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 0671044bad2f3bb0aa79ec06201375c9f812b11c..96ce6e8eeeeaf17243d8e29baa733c369437c931 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -14,14 +14,83 @@ */ #include "tq.h" -#include "tqueue.h" +#include "tdbInt.h" int32_t tqInit() { - // + int8_t old; + while (1) { + old = atomic_val_compare_exchange_8(&tqMgmt.inited, 0, 2); + if (old != 2) break; + } + + if (old == 0) { + tqMgmt.timer = taosTmrInit(10000, 100, 10000, "TQ"); + if (tqMgmt.timer == NULL) { + atomic_store_8(&tqMgmt.inited, 0); + return -1; + } + atomic_store_8(&tqMgmt.inited, 1); + } return 0; } -void tqCleanUp() {} +void tqCleanUp() { + int8_t old; + while (1) { + old = atomic_val_compare_exchange_8(&tqMgmt.inited, 1, 2); + if (old != 2) break; + } + + if (old == 1) { + taosTmrCleanUp(tqMgmt.timer); + atomic_store_8(&tqMgmt.inited, 0); + } +} + +int tqExecKeyCompare(const void* pKey1, int32_t kLen1, const void* pKey2, int32_t kLen2) { + return strcmp(pKey1, pKey2); +} + +int32_t tqStoreExec(STQ* pTq, const char* key, const STqExec* pExec) { + int32_t code; + int32_t vlen; + tEncodeSize(tEncodeSTqExec, pExec, vlen, code); + ASSERT(code == 0); + + void* buf = taosMemoryCalloc(1, vlen); + if (buf == NULL) { + ASSERT(0); + } + + SEncoder encoder; + tEncoderInit(&encoder, buf, vlen); + + if (tEncodeSTqExec(&encoder, pExec) < 0) { + ASSERT(0); + } + + TXN txn; + + if (tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) { + ASSERT(0); + } + + if (tdbBegin(pTq->pMetaStore, &txn) < 0) { + ASSERT(0); + } + + if (tdbTbUpsert(pTq->pExecStore, key, (int)strlen(key), buf, vlen, &txn) < 0) { + ASSERT(0); + } + + if (tdbCommit(pTq->pMetaStore, &txn) < 0) { + ASSERT(0); + } + + tEncoderClear(&encoder); + taosMemoryFree(buf); + return 0; +} STQ* tqOpen(const char* path, SVnode* pVnode, SWal* pWal) { STQ* pTq = taosMemoryMalloc(sizeof(STQ)); @@ -32,24 +101,72 @@ STQ* tqOpen(const char* path, SVnode* pVnode, SWal* pWal) { pTq->path = strdup(path); pTq->pVnode = pVnode; pTq->pWal = pWal; - /*if (tdbOpen(path, 4096, 1, &pTq->pTdb) < 0) {*/ + + pTq->execs = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK); + + pTq->pStreamTasks = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); + + pTq->pushMgr = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK); + + if (tdbOpen(path, 16 * 1024, 1, &pTq->pMetaStore) < 0) { + ASSERT(0); + } + + if (tdbTbOpen("exec", -1, -1, tqExecKeyCompare, pTq->pMetaStore, &pTq->pExecStore) < 0) { + ASSERT(0); + } + + TXN txn; + + if (tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, 0) < 0) { + ASSERT(0); + } + + /*if (tdbBegin(pTq->pMetaStore, &txn) < 0) {*/ /*ASSERT(0);*/ /*}*/ -#if 0 - pTq->tqMeta = tqStoreOpen(pTq, path, (FTqSerialize)tqSerializeConsumer, (FTqDeserialize)tqDeserializeConsumer, - (FTqDelete)taosMemoryFree, 0); - if (pTq->tqMeta == NULL) { - taosMemoryFree(pTq); - return NULL; + TBC* pCur; + if (tdbTbcOpen(pTq->pExecStore, &pCur, &txn) < 0) { + ASSERT(0); } -#endif - pTq->execs = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK); + void* pKey; + int kLen; + void* pVal; + int vLen; - pTq->pStreamTasks = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); + tdbTbcMoveToFirst(pCur); + SDecoder decoder; + while (tdbTbcNext(pCur, &pKey, &kLen, &pVal, &vLen) == 0) { + STqExec exec; + tDecoderInit(&decoder, (uint8_t*)pVal, vLen); + tDecodeSTqExec(&decoder, &exec); + exec.pWalReader = walOpenReadHandle(pTq->pVnode->pWal); + if (exec.subType == TOPIC_SUB_TYPE__TABLE) { + for (int32_t i = 0; i < 5; i++) { + exec.pExecReader[i] = tqInitSubmitMsgScanner(pTq->pVnode->pMeta); - pTq->pushMgr = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK); + SReadHandle handle = { + .reader = exec.pExecReader[i], + .meta = pTq->pVnode->pMeta, + .pMsgCb = &pTq->pVnode->msgCb, + }; + exec.task[i] = qCreateStreamExecTaskInfo(exec.qmsg, &handle); + ASSERT(exec.task[i]); + } + } else { + for (int32_t i = 0; i < 5; i++) { + exec.pExecReader[i] = tqInitSubmitMsgScanner(pTq->pVnode->pMeta); + } + exec.pDropTbUid = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); + } + taosHashPut(pTq->execs, pKey, kLen, &exec, sizeof(STqExec)); + } + + if (tdbTxnClose(&txn) < 0) { + ASSERT(0); + } return pTq; } @@ -60,53 +177,43 @@ void tqClose(STQ* pTq) { taosHashCleanup(pTq->execs); taosHashCleanup(pTq->pStreamTasks); taosHashCleanup(pTq->pushMgr); + tdbClose(pTq->pMetaStore); taosMemoryFree(pTq); } // TODO } -static void tdSRowDemo() { -#define DEMO_N_COLS 3 - - int16_t schemaVersion = 0; - int32_t numOfCols = DEMO_N_COLS; // ts + int - SRowBuilder rb = {0}; - - SSchema schema[DEMO_N_COLS] = { - {.type = TSDB_DATA_TYPE_TIMESTAMP, .colId = 1, .name = "ts", .bytes = 8, .flags = COL_SMA_ON}, - {.type = TSDB_DATA_TYPE_INT, .colId = 2, .name = "c1", .bytes = 4, .flags = COL_SMA_ON}, - {.type = TSDB_DATA_TYPE_INT, .colId = 3, .name = "c2", .bytes = 4, .flags = COL_SMA_ON}}; - - SSchema* pSchema = schema; - STSchema* pTSChema = tdGetSTSChemaFromSSChema(&pSchema, numOfCols); - - tdSRowInit(&rb, schemaVersion); - tdSRowSetTpInfo(&rb, numOfCols, pTSChema->flen); - int32_t maxLen = TD_ROW_MAX_BYTES_FROM_SCHEMA(pTSChema); - void* row = taosMemoryCalloc(1, maxLen); // make sure the buffer is enough - - // set row buf - tdSRowResetBuf(&rb, row); - - for (int32_t idx = 0; idx < pTSChema->numOfCols; ++idx) { - STColumn* pColumn = pTSChema->columns + idx; - if (idx == 0) { - int64_t tsKey = 1651234567; - tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, &tsKey, true, pColumn->offset, idx); - } else if (idx == 1) { - int32_t val1 = 10; - tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, &val1, true, pColumn->offset, idx); - } else { - tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NONE, NULL, true, pColumn->offset, idx); - } - } - - // print - tdSRowPrint(row, pTSChema, __func__); - - taosMemoryFree(pTSChema); +int32_t tEncodeSTqExec(SEncoder* pEncoder, const STqExec* pExec) { + if (tStartEncode(pEncoder) < 0) return -1; + if (tEncodeCStr(pEncoder, pExec->subKey) < 0) return -1; + if (tEncodeI64(pEncoder, pExec->consumerId) < 0) return -1; + if (tEncodeI32(pEncoder, pExec->epoch) < 0) return -1; + if (tEncodeI8(pEncoder, pExec->subType) < 0) return -1; + if (tEncodeI8(pEncoder, pExec->withTbName) < 0) return -1; + if (tEncodeI8(pEncoder, pExec->withSchema) < 0) return -1; + if (tEncodeI8(pEncoder, pExec->withTag) < 0) return -1; + if (pExec->subType == TOPIC_SUB_TYPE__TABLE) { + if (tEncodeCStr(pEncoder, pExec->qmsg) < 0) return -1; + } + tEndEncode(pEncoder); + return pEncoder->pos; +} + +int32_t tDecodeSTqExec(SDecoder* pDecoder, STqExec* pExec) { + if (tStartDecode(pDecoder) < 0) return -1; + if (tDecodeCStrTo(pDecoder, pExec->subKey) < 0) return -1; + if (tDecodeI64(pDecoder, &pExec->consumerId) < 0) return -1; + if (tDecodeI32(pDecoder, &pExec->epoch) < 0) return -1; + if (tDecodeI8(pDecoder, &pExec->subType) < 0) return -1; + if (tDecodeI8(pDecoder, &pExec->withTbName) < 0) return -1; + if (tDecodeI8(pDecoder, &pExec->withSchema) < 0) return -1; + if (tDecodeI8(pDecoder, &pExec->withTag) < 0) return -1; + if (pExec->subType == TOPIC_SUB_TYPE__TABLE) { + if (tDecodeCStrAlloc(pDecoder, &pExec->qmsg) < 0) return -1; + } + tEndDecode(pDecoder); + return 0; } - int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) { void* pIter = NULL; while (1) { @@ -128,6 +235,15 @@ int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) { } } } + while (1) { + pIter = taosHashIterate(pTq->pStreamTasks, pIter); + if (pIter == NULL) break; + SStreamTask* pTask = (SStreamTask*)pIter; + if (pTask->inputType == STREAM_INPUT__DATA_SUBMIT) { + int32_t code = qUpdateQualifiedTableId(pTask->exec.executor, tbUidList, isAdd); + ASSERT(code == 0); + } + } return 0; } @@ -157,7 +273,7 @@ int32_t tqPushMsgNew(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_ if (pExec->subType == TOPIC_SUB_TYPE__TABLE) { qTaskInfo_t task = pExec->task[workerId]; ASSERT(task); - qSetStreamInput(task, pReq, STREAM_DATA_TYPE_SUBMIT_BLOCK); + qSetStreamInput(task, pReq, STREAM_DATA_TYPE_SUBMIT_BLOCK, false); while (1) { SSDataBlock* pDataBlock = NULL; uint64_t ts = 0; @@ -261,166 +377,26 @@ int32_t tqPushMsgNew(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_ } int tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver) { - if (msgType != TDMT_VND_SUBMIT) return 0; - - // make sure msgType == TDMT_VND_SUBMIT - if (tdUpdateExpireWindow(pTq->pVnode->pSma, msg, ver) != 0) { - return -1; - } + if (msgType == TDMT_VND_SUBMIT) { + if (taosHashGetSize(pTq->pStreamTasks) == 0) return 0; - if (taosHashGetSize(pTq->pStreamTasks) == 0) return 0; + if (tdUpdateExpireWindow(pTq->pVnode->pSma, msg, ver) != 0) { + // TODO handle sma error + } + void* data = taosMemoryMalloc(msgLen); + if (data == NULL) { + return -1; + } + memcpy(data, msg, msgLen); - void* data = taosMemoryMalloc(msgLen); - if (data == NULL) { - return -1; + tqProcessStreamTrigger(pTq, data); } - memcpy(data, msg, msgLen); - - tqProcessStreamTriggerNew(pTq, data); - -#if 0 - SRpcMsg req = { - .msgType = TDMT_VND_STREAM_TRIGGER, - .pCont = data, - .contLen = msgLen, - }; - - tmsgPutToQueue(&pTq->pVnode->msgCb, FETCH_QUEUE, &req); -#endif return 0; } int tqCommit(STQ* pTq) { // do nothing - /*return tqStorePersist(pTq->tqMeta);*/ - return 0; -} - -int32_t tqGetTopicHandleSize(const STqTopic* pTopic) { - return strlen(pTopic->topicName) + strlen(pTopic->sql) + strlen(pTopic->physicalPlan) + strlen(pTopic->qmsg) + - sizeof(int64_t) * 3; -} - -int32_t tqGetConsumerHandleSize(const STqConsumer* pConsumer) { - int num = taosArrayGetSize(pConsumer->topics); - int32_t sz = 0; - for (int i = 0; i < num; i++) { - STqTopic* pTopic = taosArrayGet(pConsumer->topics, i); - sz += tqGetTopicHandleSize(pTopic); - } - return sz; -} - -static FORCE_INLINE int32_t tEncodeSTqTopic(void** buf, const STqTopic* pTopic) { - int32_t tlen = 0; - tlen += taosEncodeString(buf, pTopic->topicName); - /*tlen += taosEncodeString(buf, pTopic->sql);*/ - /*tlen += taosEncodeString(buf, pTopic->physicalPlan);*/ - tlen += taosEncodeString(buf, pTopic->qmsg); - /*tlen += taosEncodeFixedI64(buf, pTopic->persistedOffset);*/ - /*tlen += taosEncodeFixedI64(buf, pTopic->committedOffset);*/ - /*tlen += taosEncodeFixedI64(buf, pTopic->currentOffset);*/ - return tlen; -} - -static FORCE_INLINE const void* tDecodeSTqTopic(const void* buf, STqTopic* pTopic) { - buf = taosDecodeStringTo(buf, pTopic->topicName); - /*buf = taosDecodeString(buf, &pTopic->sql);*/ - /*buf = taosDecodeString(buf, &pTopic->physicalPlan);*/ - buf = taosDecodeString(buf, &pTopic->qmsg); - /*buf = taosDecodeFixedI64(buf, &pTopic->persistedOffset);*/ - /*buf = taosDecodeFixedI64(buf, &pTopic->committedOffset);*/ - /*buf = taosDecodeFixedI64(buf, &pTopic->currentOffset);*/ - return buf; -} - -static FORCE_INLINE int32_t tEncodeSTqConsumer(void** buf, const STqConsumer* pConsumer) { - int32_t sz; - - int32_t tlen = 0; - tlen += taosEncodeFixedI64(buf, pConsumer->consumerId); - tlen += taosEncodeFixedI32(buf, pConsumer->epoch); - tlen += taosEncodeString(buf, pConsumer->cgroup); - sz = taosArrayGetSize(pConsumer->topics); - tlen += taosEncodeFixedI32(buf, sz); - for (int32_t i = 0; i < sz; i++) { - STqTopic* pTopic = taosArrayGet(pConsumer->topics, i); - tlen += tEncodeSTqTopic(buf, pTopic); - } - return tlen; -} - -static FORCE_INLINE const void* tDecodeSTqConsumer(const void* buf, STqConsumer* pConsumer) { - int32_t sz; - - buf = taosDecodeFixedI64(buf, &pConsumer->consumerId); - buf = taosDecodeFixedI32(buf, &pConsumer->epoch); - buf = taosDecodeStringTo(buf, pConsumer->cgroup); - buf = taosDecodeFixedI32(buf, &sz); - pConsumer->topics = taosArrayInit(sz, sizeof(STqTopic)); - if (pConsumer->topics == NULL) return NULL; - for (int32_t i = 0; i < sz; i++) { - STqTopic pTopic; - buf = tDecodeSTqTopic(buf, &pTopic); - taosArrayPush(pConsumer->topics, &pTopic); - } - return buf; -} - -int tqSerializeConsumer(const STqConsumer* pConsumer, STqSerializedHead** ppHead) { - int32_t sz = tEncodeSTqConsumer(NULL, pConsumer); - - if (sz > (*ppHead)->ssize) { - void* tmpPtr = taosMemoryRealloc(*ppHead, sizeof(STqSerializedHead) + sz); - if (tmpPtr == NULL) { - taosMemoryFree(*ppHead); - terrno = TSDB_CODE_TQ_OUT_OF_MEMORY; - return -1; - } - *ppHead = tmpPtr; - (*ppHead)->ssize = sz; - } - - void* ptr = (*ppHead)->content; - void* abuf = ptr; - tEncodeSTqConsumer(&abuf, pConsumer); - - return 0; -} - -int32_t tqDeserializeConsumer(STQ* pTq, const STqSerializedHead* pHead, STqConsumer** ppConsumer) { - const void* str = pHead->content; - *ppConsumer = taosMemoryCalloc(1, sizeof(STqConsumer)); - if (*ppConsumer == NULL) { - terrno = TSDB_CODE_TQ_OUT_OF_MEMORY; - return -1; - } - if (tDecodeSTqConsumer(str, *ppConsumer) == NULL) { - terrno = TSDB_CODE_TQ_OUT_OF_MEMORY; - return -1; - } - STqConsumer* pConsumer = *ppConsumer; - int32_t sz = taosArrayGetSize(pConsumer->topics); - for (int32_t i = 0; i < sz; i++) { - STqTopic* pTopic = taosArrayGet(pConsumer->topics, i); - pTopic->pReadhandle = walOpenReadHandle(pTq->pWal); - if (pTopic->pReadhandle == NULL) { - ASSERT(false); - } - for (int j = 0; j < TQ_BUFFER_SIZE; j++) { - pTopic->buffer.output[j].status = 0; - STqReadHandle* pReadHandle = tqInitSubmitMsgScanner(pTq->pVnode->pMeta); - SReadHandle handle = { - .reader = pReadHandle, - .meta = pTq->pVnode->pMeta, - .pMsgCb = &pTq->pVnode->msgCb, - }; - pTopic->buffer.output[j].pReadHandle = pReadHandle; - pTopic->buffer.output[j].task = qCreateStreamExecTaskInfo(pTopic->qmsg, &handle); - } - } - return 0; } @@ -543,7 +519,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) { if (pExec->subType == TOPIC_SUB_TYPE__TABLE) { qTaskInfo_t task = pExec->task[workerId]; ASSERT(task); - qSetStreamInput(task, pCont, STREAM_DATA_TYPE_SUBMIT_BLOCK); + qSetStreamInput(task, pCont, STREAM_DATA_TYPE_SUBMIT_BLOCK, false); while (1) { SSDataBlock* pDataBlock = NULL; uint64_t ts = 0; @@ -600,6 +576,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) { SSDataBlock block = {0}; if (tqRetrieveDataBlock(&block.pDataBlock, pReader, &block.info.groupId, &block.info.uid, &block.info.rows, &block.info.numOfCols) < 0) { + if (terrno == TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND) continue; ASSERT(0); } int32_t dataStrLen = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(&block); @@ -684,220 +661,32 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) { return 0; } -#if 0 -int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) { - SMqPollReq* pReq = pMsg->pCont; - int64_t consumerId = pReq->consumerId; - int64_t fetchOffset; - int64_t blockingTime = pReq->blockingTime; - int32_t reqEpoch = pReq->epoch; - - if (pReq->currentOffset == TMQ_CONF__RESET_OFFSET__EARLIEAST) { - fetchOffset = walGetFirstVer(pTq->pWal); - } else if (pReq->currentOffset == TMQ_CONF__RESET_OFFSET__LATEST) { - fetchOffset = walGetLastVer(pTq->pWal); - } else { - fetchOffset = pReq->currentOffset + 1; - } +int32_t tqProcessVgDeleteReq(STQ* pTq, char* msg, int32_t msgLen) { + SMqVDeleteReq* pReq = (SMqVDeleteReq*)msg; - tqDebug("tmq poll: consumer %ld (epoch %d) recv poll req in vg %d, req %ld %ld", consumerId, pReq->epoch, - TD_VID(pTq->pVnode), pReq->currentOffset, fetchOffset); + int32_t code = taosHashRemove(pTq->execs, pReq->subKey, strlen(pReq->subKey)); + ASSERT(code == 0); - SMqPollRspV2 rspV2 = {0}; - rspV2.dataLen = 0; + TXN txn; - STqConsumer* pConsumer = tqHandleGet(pTq->tqMeta, consumerId); - if (pConsumer == NULL) { - vWarn("tmq poll: consumer %ld (epoch %d) not found in vg %d", consumerId, pReq->epoch, TD_VID(pTq->pVnode)); - pMsg->pCont = NULL; - pMsg->contLen = 0; - pMsg->code = -1; - tmsgSendRsp(pMsg); - return 0; + if (tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) { + ASSERT(0); } - int32_t consumerEpoch = atomic_load_32(&pConsumer->epoch); - while (consumerEpoch < reqEpoch) { - consumerEpoch = atomic_val_compare_exchange_32(&pConsumer->epoch, consumerEpoch, reqEpoch); + if (tdbBegin(pTq->pMetaStore, &txn) < 0) { + ASSERT(0); } - STqTopic* pTopic = NULL; - int32_t topicSz = taosArrayGetSize(pConsumer->topics); - for (int32_t i = 0; i < topicSz; i++) { - STqTopic* topic = taosArrayGet(pConsumer->topics, i); - // TODO race condition - ASSERT(pConsumer->consumerId == consumerId); - if (strcmp(topic->topicName, pReq->topic) == 0) { - pTopic = topic; - break; - } - } - if (pTopic == NULL) { - vWarn("tmq poll: consumer %ld (epoch %d) topic %s not found in vg %d", consumerId, pReq->epoch, pReq->topic, - TD_VID(pTq->pVnode)); - pMsg->pCont = NULL; - pMsg->contLen = 0; - pMsg->code = -1; - tmsgSendRsp(pMsg); - return 0; + if (tdbTbDelete(pTq->pExecStore, pReq->subKey, (int)strlen(pReq->subKey), &txn) < 0) { + /*ASSERT(0);*/ } - tqDebug("poll topic %s from consumer %ld (epoch %d) vg %d", pTopic->topicName, consumerId, pReq->epoch, - TD_VID(pTq->pVnode)); - - rspV2.reqOffset = pReq->currentOffset; - rspV2.skipLogNum = 0; - - while (1) { - /*if (fetchOffset > walGetLastVer(pTq->pWal) || walReadWithHandle(pTopic->pReadhandle, fetchOffset) < 0) {*/ - // TODO - consumerEpoch = atomic_load_32(&pConsumer->epoch); - if (consumerEpoch > reqEpoch) { - tqDebug("tmq poll: consumer %ld (epoch %d) vg %d offset %ld, found new consumer epoch %d discard req epoch %d", - consumerId, pReq->epoch, TD_VID(pTq->pVnode), fetchOffset, consumerEpoch, reqEpoch); - break; - } - SWalReadHead* pHead; - if (walReadWithHandle_s(pTopic->pReadhandle, fetchOffset, &pHead) < 0) { - // TODO: no more log, set timer to wait blocking time - // if data inserted during waiting, launch query and - // response to user - tqDebug("tmq poll: consumer %ld (epoch %d) vg %d offset %ld, no more log to return", consumerId, pReq->epoch, - TD_VID(pTq->pVnode), fetchOffset); - break; - } - tqDebug("tmq poll: consumer %ld (epoch %d) iter log, vg %d offset %ld msgType %d", consumerId, pReq->epoch, - TD_VID(pTq->pVnode), fetchOffset, pHead->msgType); - /*int8_t pos = fetchOffset % TQ_BUFFER_SIZE;*/ - /*pHead = pTopic->pReadhandle->pHead;*/ - if (pHead->msgType == TDMT_VND_SUBMIT) { - SSubmitReq* pCont = (SSubmitReq*)&pHead->body; - qTaskInfo_t task = pTopic->buffer.output[workerId].task; - ASSERT(task); - qSetStreamInput(task, pCont, STREAM_DATA_TYPE_SUBMIT_BLOCK); - SArray* pRes = taosArrayInit(0, sizeof(SSDataBlock)); - while (1) { - SSDataBlock* pDataBlock = NULL; - uint64_t ts; - if (qExecTask(task, &pDataBlock, &ts) < 0) { - ASSERT(false); - } - if (pDataBlock == NULL) { - /*pos = fetchOffset % TQ_BUFFER_SIZE;*/ - break; - } - - taosArrayPush(pRes, pDataBlock); - } - - if (taosArrayGetSize(pRes) == 0) { - tqDebug("tmq poll: consumer %ld (epoch %d) iter log, vg %d skip log %ld since not wanted", consumerId, - pReq->epoch, TD_VID(pTq->pVnode), fetchOffset); - fetchOffset++; - rspV2.skipLogNum++; - taosArrayDestroy(pRes); - continue; - } - rspV2.rspOffset = fetchOffset; - - int32_t blockSz = taosArrayGetSize(pRes); - int32_t dataBlockStrLen = 0; - for (int32_t i = 0; i < blockSz; i++) { - SSDataBlock* pBlock = taosArrayGet(pRes, i); - dataBlockStrLen += sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock); - } - - void* dataBlockBuf = taosMemoryMalloc(dataBlockStrLen); - if (dataBlockBuf == NULL) { - pMsg->code = -1; - taosMemoryFree(pHead); - } - - rspV2.blockData = dataBlockBuf; - - int32_t pos; - rspV2.blockPos = taosArrayInit(blockSz, sizeof(int32_t)); - for (int32_t i = 0; i < blockSz; i++) { - pos = 0; - SSDataBlock* pBlock = taosArrayGet(pRes, i); - SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)dataBlockBuf; - pRetrieve->useconds = 0; - pRetrieve->precision = 0; - pRetrieve->compressed = 0; - pRetrieve->completed = 1; - pRetrieve->numOfRows = htonl(pBlock->info.rows); - blockCompressEncode(pBlock, pRetrieve->data, &pos, pBlock->info.numOfCols, false); - taosArrayPush(rspV2.blockPos, &rspV2.dataLen); - - int32_t totLen = sizeof(SRetrieveTableRsp) + pos; - pRetrieve->compLen = htonl(totLen); - rspV2.dataLen += totLen; - dataBlockBuf = POINTER_SHIFT(dataBlockBuf, totLen); - } - ASSERT(POINTER_DISTANCE(dataBlockBuf, rspV2.blockData) <= dataBlockStrLen); - - int32_t msgLen = sizeof(SMqRspHead) + tEncodeSMqPollRspV2(NULL, &rspV2); - void* buf = rpcMallocCont(msgLen); - - ((SMqRspHead*)buf)->mqMsgType = TMQ_MSG_TYPE__POLL_RSP; - ((SMqRspHead*)buf)->epoch = pReq->epoch; - ((SMqRspHead*)buf)->consumerId = consumerId; - - void* msgBodyBuf = POINTER_SHIFT(buf, sizeof(SMqRspHead)); - tEncodeSMqPollRspV2(&msgBodyBuf, &rspV2); - - /*rsp.pBlockData = pRes;*/ - - /*taosArrayDestroyEx(rsp.pBlockData, (void (*)(void*))tDeleteSSDataBlock);*/ - SRpcMsg resp = {.info = pMsg->info, pCont = buf, .contLen = msgLen, .code = 0}; - tqDebug("vg %d offset %ld msgType %d from consumer %ld (epoch %d) actual rsp", TD_VID(pTq->pVnode), fetchOffset, - pHead->msgType, consumerId, pReq->epoch); - tmsgSendRsp(&resp); - taosMemoryFree(pHead); - return 0; - } else { - taosMemoryFree(pHead); - fetchOffset++; - rspV2.skipLogNum++; - } - } - - /*if (blockingTime != 0) {*/ - /*tqAddClientPusher(pTq->tqPushMgr, pMsg, consumerId, blockingTime);*/ - /*} else {*/ - - rspV2.rspOffset = fetchOffset - 1; - - int32_t tlen = sizeof(SMqRspHead) + tEncodeSMqPollRspV2(NULL, &rspV2); - void* buf = rpcMallocCont(tlen); - if (buf == NULL) { - pMsg->code = -1; - return -1; + if (tdbCommit(pTq->pMetaStore, &txn) < 0) { + ASSERT(0); } - ((SMqRspHead*)buf)->mqMsgType = TMQ_MSG_TYPE__POLL_RSP; - ((SMqRspHead*)buf)->epoch = pReq->epoch; - ((SMqRspHead*)buf)->consumerId = consumerId; - - void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead)); - tEncodeSMqPollRspV2(&abuf, &rspV2); - - SRpcMsg resp = {.info = pMsg->info, .pCont = buf, .contLen = tlen, .code = 0}; - tmsgSendRsp(&resp); - tqDebug("vg %d offset %ld from consumer %ld (epoch %d) not rsp", TD_VID(pTq->pVnode), fetchOffset, consumerId, - pReq->epoch); - /*}*/ return 0; } -#endif - -int32_t tqProcessVgDeleteReq(STQ* pTq, char* msg, int32_t msgLen) { - SMqVDeleteReq* pReq = (SMqVDeleteReq*)msg; - - int32_t code = taosHashRemove(pTq->execs, pReq->subKey, strlen(pReq->subKey)); - ASSERT(code == 0); - return 0; -} // TODO: persist meta into tdb int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen) { @@ -944,22 +733,22 @@ int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen) { pExec->pDropTbUid = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); } taosHashPut(pTq->execs, req.subKey, strlen(req.subKey), pExec, sizeof(STqExec)); + + if (tqStoreExec(pTq, req.subKey, pExec) < 0) { + // TODO + } return 0; } else { - /*if (req.newConsumerId != -1) {*/ - /*taosWLockLatch(&pExec->lock);*/ - ASSERT(pExec->consumerId == req.oldConsumerId); + /*ASSERT(pExec->consumerId == req.oldConsumerId);*/ // TODO handle qmsg and exec modification atomic_store_32(&pExec->epoch, -1); atomic_store_64(&pExec->consumerId, req.newConsumerId); atomic_add_fetch_32(&pExec->epoch, 1); - /*taosWUnLockLatch(&pExec->lock);*/ + + if (tqStoreExec(pTq, req.subKey, pExec) < 0) { + // TODO + } return 0; - /*} else {*/ - // TODO - /*taosHashRemove(pTq->tqMetaNew, req.subKey, strlen(req.subKey));*/ - /*return 0;*/ - /*}*/ } } @@ -968,7 +757,8 @@ void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data) { SVnode* pVnode = (SVnode*)vnode; ASSERT(pTask->tbSink.pTSchema); - SSubmitReq* pReq = tdBlockToSubmit(pRes, pTask->tbSink.pTSchema, true, pTask->tbSink.stbUid, pVnode->config.vgId); + SSubmitReq* pReq = tdBlockToSubmit(pRes, pTask->tbSink.pTSchema, true, pTask->tbSink.stbUid, + pTask->tbSink.stbFullName, pVnode->config.vgId); /*tPrintFixedSchemaSubmitReq(pReq, pTask->tbSink.pTSchema);*/ // build write msg SRpcMsg msg = { @@ -980,7 +770,18 @@ void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data) { ASSERT(tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg) == 0); } -int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int32_t parallel) { +int32_t tqProcessTaskDeploy(STQ* pTq, char* msg, int32_t msgLen) { + SStreamTask* pTask = taosMemoryCalloc(1, sizeof(SStreamTask)); + if (pTask == NULL) { + return -1; + } + SDecoder decoder; + tDecoderInit(&decoder, (uint8_t*)msg, msgLen); + if (tDecodeSStreamTask(&decoder, pTask) < 0) { + ASSERT(0); + } + tDecoderClear(&decoder); + pTask->status = TASK_STATUS__IDLE; pTask->inputStatus = TASK_INPUT_STATUS__NORMAL; pTask->outputStatus = TASK_OUTPUT_STATUS__NORMAL; @@ -993,57 +794,19 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int32_t parallel) { if (pTask->inputQ == NULL || pTask->outputQ == NULL || pTask->inputQAll == NULL || pTask->outputQAll == NULL) goto FAIL; + // exec if (pTask->execType != TASK_EXEC__NONE) { // expand runners - pTask->exec.numOfRunners = parallel; - pTask->exec.runners = taosMemoryCalloc(parallel, sizeof(SStreamRunner)); - if (pTask->exec.runners == NULL) { - goto FAIL; - } - for (int32_t i = 0; i < parallel; i++) { - STqReadHandle* pStreamReader = tqInitSubmitMsgScanner(pTq->pVnode->pMeta); - SReadHandle handle = { - .reader = pStreamReader, - .meta = pTq->pVnode->pMeta, - .pMsgCb = &pTq->pVnode->msgCb, - .vnode = pTq->pVnode, - }; - pTask->exec.runners[i].inputHandle = pStreamReader; - pTask->exec.runners[i].executor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle); - ASSERT(pTask->exec.runners[i].executor); - } - } - - if (pTask->sinkType == TASK_SINK__TABLE) { - pTask->tbSink.vnode = pTq->pVnode; - pTask->tbSink.tbSinkFunc = tqTableSink; - } - - return 0; -FAIL: - if (pTask->inputQ) taosCloseQueue(pTask->inputQ); - if (pTask->outputQ) taosCloseQueue(pTask->outputQ); - if (pTask->inputQAll) taosFreeQall(pTask->inputQAll); - if (pTask->outputQAll) taosFreeQall(pTask->outputQAll); - if (pTask) taosMemoryFree(pTask); - return -1; -} - -int32_t tqProcessTaskDeploy(STQ* pTq, char* msg, int32_t msgLen) { - SStreamTask* pTask = taosMemoryCalloc(1, sizeof(SStreamTask)); - if (pTask == NULL) { - return -1; - } - SDecoder decoder; - tDecoderInit(&decoder, (uint8_t*)msg, msgLen); - if (tDecodeSStreamTask(&decoder, pTask) < 0) { - ASSERT(0); - } - tDecoderClear(&decoder); - - // exec - if (tqExpandTask(pTq, pTask, 4) < 0) { - ASSERT(0); + STqReadHandle* pStreamReader = tqInitSubmitMsgScanner(pTq->pVnode->pMeta); + SReadHandle handle = { + .reader = pStreamReader, + .meta = pTq->pVnode->pMeta, + .pMsgCb = &pTq->pVnode->msgCb, + .vnode = pTq->pVnode, + }; + pTask->exec.inputHandle = pStreamReader; + pTask->exec.executor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle); + ASSERT(pTask->exec.executor); } // sink @@ -1051,8 +814,12 @@ int32_t tqProcessTaskDeploy(STQ* pTq, char* msg, int32_t msgLen) { if (pTask->sinkType == TASK_SINK__SMA) { pTask->smaSink.smaSink = smaHandleRes; } else if (pTask->sinkType == TASK_SINK__TABLE) { + pTask->tbSink.vnode = pTq->pVnode; + pTask->tbSink.tbSinkFunc = tqTableSink; + ASSERT(pTask->tbSink.pSchemaWrapper); ASSERT(pTask->tbSink.pSchemaWrapper->pSchema); + pTask->tbSink.pTSchema = tdGetSTSChemaFromSSChema(&pTask->tbSink.pSchemaWrapper->pSchema, pTask->tbSink.pSchemaWrapper->nCols); ASSERT(pTask->tbSink.pTSchema); @@ -1060,115 +827,26 @@ int32_t tqProcessTaskDeploy(STQ* pTq, char* msg, int32_t msgLen) { taosHashPut(pTq->pStreamTasks, &pTask->taskId, sizeof(int32_t), pTask, sizeof(SStreamTask)); - return 0; -} - -int32_t tqProcessStreamTrigger(STQ* pTq, void* data, int32_t dataLen, int32_t workerId) { - void* pIter = NULL; - - while (1) { - pIter = taosHashIterate(pTq->pStreamTasks, pIter); - if (pIter == NULL) break; - SStreamTask* pTask = (SStreamTask*)pIter; - - if (streamExecTask(pTask, &pTq->pVnode->msgCb, data, STREAM_DATA_TYPE_SUBMIT_BLOCK, workerId) < 0) { - // TODO - } - } - return 0; -} - -#if 0 -int32_t tqProcessStreamTriggerNew(STQ* pTq, SSubmitReq* data) { - SStreamDataSubmit* pSubmit = NULL; - - // build data - pSubmit = taosAllocateQitem(sizeof(SStreamDataSubmit), DEF_QITEM); - if (pSubmit == NULL) return -1; - pSubmit->dataRef = taosMemoryMalloc(sizeof(int32_t)); - if (pSubmit->dataRef == NULL) goto FAIL; - *pSubmit->dataRef = 1; - pSubmit->data = data; - pSubmit->type = STREAM_INPUT__DATA_BLOCK; - - void* pIter = NULL; - while (1) { - pIter = taosHashIterate(pTq->pStreamTasks, pIter); - if (pIter == NULL) break; - SStreamTask* pTask = (SStreamTask*)pIter; - if (pTask->inputType == TASK_INPUT_TYPE__SUMBIT_BLOCK) { - streamEnqueueDataSubmit(pTask, pSubmit); - // TODO cal back pressure - } - // check run - int8_t execStatus = atomic_load_8(&pTask->status); - if (execStatus == TASK_STATUS__IDLE || execStatus == TASK_STATUS__CLOSING) { - SStreamTaskRunReq* pReq = taosMemoryMalloc(sizeof(SStreamTaskRunReq)); - if (pReq == NULL) continue; - // TODO: do we need htonl? - pReq->head.vgId = pTq->pVnode->config.vgId; - pReq->streamId = pTask->streamId; - pReq->taskId = pTask->taskId; - SRpcMsg msg = { - .msgType = 0, - .pCont = pReq, - .contLen = sizeof(SStreamTaskRunReq), - }; - tmsgPutToQueue(&pTq->pVnode->msgCb, FETCH_QUEUE, &msg); - } - } - streamDataSubmitRefDec(pSubmit); - return 0; FAIL: - if (pSubmit) { - if (pSubmit->dataRef) { - taosMemoryFree(pSubmit->dataRef); - } - taosFreeQitem(pSubmit); - } + if (pTask->inputQ) taosCloseQueue(pTask->inputQ); + if (pTask->outputQ) taosCloseQueue(pTask->outputQ); + if (pTask->inputQAll) taosFreeQall(pTask->inputQAll); + if (pTask->outputQAll) taosFreeQall(pTask->outputQAll); + if (pTask) taosMemoryFree(pTask); return -1; } -#endif - -int32_t tqProcessTaskExec(STQ* pTq, char* msg, int32_t msgLen, int32_t workerId) { - SStreamTaskExecReq req; - tDecodeSStreamTaskExecReq(msg, &req); - int32_t taskId = req.taskId; - ASSERT(taskId); - - SStreamTask* pTask = taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t)); - ASSERT(pTask); - - if (streamExecTask(pTask, &pTq->pVnode->msgCb, req.data, STREAM_DATA_TYPE_SSDATA_BLOCK, workerId) < 0) { - // TODO - } - return 0; -} - -int32_t tqProcessStreamTriggerNew(STQ* pTq, SSubmitReq* pReq) { - void* pIter = NULL; - bool failed = false; +int32_t tqProcessStreamTrigger(STQ* pTq, SSubmitReq* pReq) { + void* pIter = NULL; + bool failed = false; + SStreamDataSubmit* pSubmit = NULL; - SStreamDataSubmit* pSubmit = taosAllocateQitem(sizeof(SStreamDataSubmit), DEF_QITEM); + pSubmit = streamDataSubmitNew(pReq); if (pSubmit == NULL) { failed = true; - goto SET_TASK_FAIL; } - pSubmit->dataRef = taosMemoryMalloc(sizeof(int32_t)); - if (pSubmit->dataRef == NULL) { - failed = true; - goto SET_TASK_FAIL; - } - - pSubmit->type = STREAM_INPUT__DATA_SUBMIT; - /*pSubmit->sourceVer = ver;*/ - /*pSubmit->sourceVg = pTq->pVnode->config.vgId;*/ - pSubmit->data = pReq; - *pSubmit->dataRef = 1; -SET_TASK_FAIL: while (1) { pIter = taosHashIterate(pTq->pStreamTasks, pIter); if (pIter == NULL) break; @@ -1183,7 +861,9 @@ SET_TASK_FAIL: } streamDataSubmitRefInc(pSubmit); - taosWriteQitem(pTask->inputQ, pSubmit); + SStreamDataSubmit* pSubmitClone = taosAllocateQitem(sizeof(SStreamDataSubmit), DEF_QITEM); + memcpy(pSubmitClone, pSubmit, sizeof(SStreamDataSubmit)); + taosWriteQitem(pTask->inputQ, pSubmitClone); int8_t execStatus = atomic_load_8(&pTask->status); if (execStatus == TASK_STATUS__IDLE || execStatus == TASK_STATUS__CLOSING) { @@ -1206,18 +886,12 @@ SET_TASK_FAIL: } } - if (!failed) { + if (pSubmit) { streamDataSubmitRefDec(pSubmit); - return 0; - } else { - if (pSubmit) { - if (pSubmit->dataRef) { - taosMemoryFree(pSubmit->dataRef); - } - taosFreeQitem(pSubmit); - } - return -1; + taosFreeQitem(pSubmit); } + + return failed ? -1 : 0; } int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) { @@ -1233,7 +907,7 @@ int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg) { SStreamDispatchReq* pReq = pMsg->pCont; int32_t taskId = pReq->taskId; SStreamTask* pTask = taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t)); - streamTaskProcessDispatchReq(pTask, &pTq->pVnode->msgCb, pReq, pMsg); + streamProcessDispatchReq(pTask, &pTq->pVnode->msgCb, pReq, pMsg); return 0; } @@ -1241,7 +915,7 @@ int32_t tqProcessTaskRecoverReq(STQ* pTq, SRpcMsg* pMsg) { SStreamTaskRecoverReq* pReq = pMsg->pCont; int32_t taskId = pReq->taskId; SStreamTask* pTask = taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t)); - streamTaskProcessRecoverReq(pTask, &pTq->pVnode->msgCb, pReq, pMsg); + streamProcessRecoverReq(pTask, &pTq->pVnode->msgCb, pReq, pMsg); return 0; } @@ -1249,7 +923,7 @@ int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg) { SStreamDispatchRsp* pRsp = pMsg->pCont; int32_t taskId = pRsp->taskId; SStreamTask* pTask = taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t)); - streamTaskProcessDispatchRsp(pTask, &pTq->pVnode->msgCb, pRsp); + streamProcessDispatchRsp(pTask, &pTq->pVnode->msgCb, pRsp); return 0; } @@ -1257,6 +931,6 @@ int32_t tqProcessTaskRecoverRsp(STQ* pTq, SRpcMsg* pMsg) { SStreamTaskRecoverRsp* pRsp = pMsg->pCont; int32_t taskId = pRsp->taskId; SStreamTask* pTask = taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t)); - streamTaskProcessRecoverRsp(pTask, pRsp); + streamProcessRecoverRsp(pTask, pRsp); return 0; } diff --git a/source/dnode/vnode/src/tq/tqMetaStore.c b/source/dnode/vnode/src/tq/tqMetaStore.c deleted file mode 100644 index ca09cc1dc13094cf8ba53e685f8c973a1c0250b7..0000000000000000000000000000000000000000 --- a/source/dnode/vnode/src/tq/tqMetaStore.c +++ /dev/null @@ -1,622 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ -#include "tq.h" -// #include -// #include -// #include -// #include "osDir.h" - -#define TQ_META_NAME "tq.meta" -#define TQ_IDX_NAME "tq.idx" - -static int32_t tqHandlePutCommitted(STqMetaStore*, int64_t key, void* value); -static void* tqHandleGetUncommitted(STqMetaStore*, int64_t key); - -static inline void tqLinkUnpersist(STqMetaStore* pMeta, STqMetaList* pNode) { - if (pNode->unpersistNext == NULL) { - pNode->unpersistNext = pMeta->unpersistHead->unpersistNext; - pNode->unpersistPrev = pMeta->unpersistHead; - pMeta->unpersistHead->unpersistNext->unpersistPrev = pNode; - pMeta->unpersistHead->unpersistNext = pNode; - } -} - -static inline int64_t tqSeekLastPage(TdFilePtr pFile) { - int offset = taosLSeekFile(pFile, 0, SEEK_END); - int pageNo = offset / TQ_PAGE_SIZE; - int curPageOffset = pageNo * TQ_PAGE_SIZE; - return taosLSeekFile(pFile, curPageOffset, SEEK_SET); -} - -// TODO: the struct is tightly coupled with index entry -typedef struct STqIdxPageHead { - int16_t writeOffset; - int8_t unused[14]; -} STqIdxPageHead; - -typedef struct STqIdxPageBuf { - STqIdxPageHead head; - char buffer[TQ_IDX_PAGE_BODY_SIZE]; -} STqIdxPageBuf; - -static inline int tqReadLastPage(TdFilePtr pFile, STqIdxPageBuf* pBuf) { - int offset = tqSeekLastPage(pFile); - int nBytes; - if ((nBytes = taosReadFile(pFile, pBuf, TQ_PAGE_SIZE)) == -1) { - terrno = TAOS_SYSTEM_ERROR(errno); - return -1; - } - if (nBytes == 0) { - memset(pBuf, 0, TQ_PAGE_SIZE); - pBuf->head.writeOffset = TQ_IDX_PAGE_HEAD_SIZE; - } - ASSERT(nBytes == 0 || nBytes == pBuf->head.writeOffset); - - return taosLSeekFile(pFile, offset, SEEK_SET); -} - -STqMetaStore* tqStoreOpen(STQ* pTq, const char* path, FTqSerialize serializer, FTqDeserialize deserializer, - FTqDelete deleter, int32_t tqConfigFlag) { - STqMetaStore* pMeta = taosMemoryCalloc(1, sizeof(STqMetaStore)); - if (pMeta == NULL) { - terrno = TSDB_CODE_TQ_OUT_OF_MEMORY; - return NULL; - } - pMeta->pTq = pTq; - - // concat data file name and index file name - size_t pathLen = strlen(path); - pMeta->dirPath = taosMemoryMalloc(pathLen + 1); - if (pMeta->dirPath == NULL) { - terrno = TSDB_CODE_TQ_OUT_OF_MEMORY; - taosMemoryFree(pMeta); - return NULL; - } - strcpy(pMeta->dirPath, path); - - char* name = taosMemoryMalloc(pathLen + 10); - - strcpy(name, path); - if (!taosDirExist(name) && taosMkDir(name) != 0) { - terrno = TSDB_CODE_TQ_FAILED_TO_CREATE_DIR; - tqError("failed to create dir:%s since %s ", name, terrstr()); - } - strcat(name, "/" TQ_IDX_NAME); - TdFilePtr pIdxFile = taosOpenFile(name, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_READ); - if (pIdxFile == NULL) { - terrno = TAOS_SYSTEM_ERROR(errno); - tqError("failed to open file:%s since %s ", name, terrstr()); - // free memory - taosMemoryFree(name); - return NULL; - } - - pMeta->pIdxFile = pIdxFile; - pMeta->unpersistHead = taosMemoryCalloc(1, sizeof(STqMetaList)); - if (pMeta->unpersistHead == NULL) { - terrno = TSDB_CODE_TQ_OUT_OF_MEMORY; - taosMemoryFree(name); - return NULL; - } - pMeta->unpersistHead->unpersistNext = pMeta->unpersistHead->unpersistPrev = pMeta->unpersistHead; - - strcpy(name, path); - strcat(name, "/" TQ_META_NAME); - TdFilePtr pFile = taosOpenFile(name, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_READ); - if (pFile == NULL) { - terrno = TAOS_SYSTEM_ERROR(errno); - tqError("failed to open file:%s since %s", name, terrstr()); - taosMemoryFree(name); - return NULL; - } - taosMemoryFree(name); - - pMeta->pFile = pFile; - - pMeta->pSerializer = serializer; - pMeta->pDeserializer = deserializer; - pMeta->pDeleter = deleter; - pMeta->tqConfigFlag = tqConfigFlag; - - // read idx file and load into memory - STqIdxPageBuf idxBuf; - STqSerializedHead* serializedObj = taosMemoryMalloc(TQ_PAGE_SIZE); - if (serializedObj == NULL) { - terrno = TSDB_CODE_TQ_OUT_OF_MEMORY; - } - int idxRead; - int allocated = TQ_PAGE_SIZE; - bool readEnd = false; - while ((idxRead = taosReadFile(pIdxFile, &idxBuf, TQ_PAGE_SIZE))) { - if (idxRead == -1) { - // TODO: handle error - terrno = TAOS_SYSTEM_ERROR(errno); - tqError("failed to read tq index file since %s", terrstr()); - } - ASSERT(idxBuf.head.writeOffset == idxRead); - // loop read every entry - for (int i = 0; i < idxBuf.head.writeOffset - TQ_IDX_PAGE_HEAD_SIZE; i += TQ_IDX_SIZE) { - STqMetaList* pNode = taosMemoryCalloc(1, sizeof(STqMetaList)); - if (pNode == NULL) { - terrno = TSDB_CODE_TQ_OUT_OF_MEMORY; - // TODO: free memory - } - memcpy(&pNode->handle, &idxBuf.buffer[i], TQ_IDX_SIZE); - - taosLSeekFile(pFile, pNode->handle.offset, SEEK_SET); - if (allocated < pNode->handle.serializedSize) { - void* ptr = taosMemoryRealloc(serializedObj, pNode->handle.serializedSize); - if (ptr == NULL) { - terrno = TSDB_CODE_TQ_OUT_OF_MEMORY; - // TODO: free memory - } - serializedObj = ptr; - allocated = pNode->handle.serializedSize; - } - serializedObj->ssize = pNode->handle.serializedSize; - if (taosReadFile(pFile, serializedObj, pNode->handle.serializedSize) != pNode->handle.serializedSize) { - // TODO: read error - } - if (serializedObj->action == TQ_ACTION_INUSE) { - if (serializedObj->ssize != sizeof(STqSerializedHead)) { - pMeta->pDeserializer(pTq, serializedObj, &pNode->handle.valueInUse); - } else { - pNode->handle.valueInUse = TQ_DELETE_TOKEN; - } - } else if (serializedObj->action == TQ_ACTION_INTXN) { - if (serializedObj->ssize != sizeof(STqSerializedHead)) { - pMeta->pDeserializer(pTq, serializedObj, &pNode->handle.valueInTxn); - } else { - pNode->handle.valueInTxn = TQ_DELETE_TOKEN; - } - } else if (serializedObj->action == TQ_ACTION_INUSE_CONT) { - if (serializedObj->ssize != sizeof(STqSerializedHead)) { - pMeta->pDeserializer(pTq, serializedObj, &pNode->handle.valueInUse); - } else { - pNode->handle.valueInUse = TQ_DELETE_TOKEN; - } - STqSerializedHead* ptr = POINTER_SHIFT(serializedObj, serializedObj->ssize); - if (ptr->ssize != sizeof(STqSerializedHead)) { - pMeta->pDeserializer(pTq, ptr, &pNode->handle.valueInTxn); - } else { - pNode->handle.valueInTxn = TQ_DELETE_TOKEN; - } - } else { - ASSERT(0); - } - - // put into list - int bucketKey = pNode->handle.key & TQ_BUCKET_MASK; - STqMetaList* pBucketNode = pMeta->bucket[bucketKey]; - if (pBucketNode == NULL) { - pMeta->bucket[bucketKey] = pNode; - } else if (pBucketNode->handle.key == pNode->handle.key) { - pNode->next = pBucketNode->next; - pMeta->bucket[bucketKey] = pNode; - } else { - while (pBucketNode->next && pBucketNode->next->handle.key != pNode->handle.key) { - pBucketNode = pBucketNode->next; - } - if (pBucketNode->next) { - ASSERT(pBucketNode->next->handle.key == pNode->handle.key); - STqMetaList* pNodeFound = pBucketNode->next; - pNode->next = pNodeFound->next; - pBucketNode->next = pNode; - pBucketNode = pNodeFound; - } else { - pNode->next = pMeta->bucket[bucketKey]; - pMeta->bucket[bucketKey] = pNode; - pBucketNode = NULL; - } - } - if (pBucketNode) { - if (pBucketNode->handle.valueInUse && pBucketNode->handle.valueInUse != TQ_DELETE_TOKEN) { - pMeta->pDeleter(pBucketNode->handle.valueInUse); - } - if (pBucketNode->handle.valueInTxn && pBucketNode->handle.valueInTxn != TQ_DELETE_TOKEN) { - pMeta->pDeleter(pBucketNode->handle.valueInTxn); - } - taosMemoryFree(pBucketNode); - } - } - } - taosMemoryFree(serializedObj); - return pMeta; -} - -int32_t tqStoreClose(STqMetaStore* pMeta) { - // commit data and idx - tqStorePersist(pMeta); - ASSERT(pMeta->unpersistHead && pMeta->unpersistHead->next == NULL); - taosCloseFile(&pMeta->pFile); - taosCloseFile(&pMeta->pIdxFile); - // free memory - for (int i = 0; i < TQ_BUCKET_SIZE; i++) { - STqMetaList* pNode = pMeta->bucket[i]; - while (pNode) { - ASSERT(pNode->unpersistNext == NULL); - ASSERT(pNode->unpersistPrev == NULL); - if (pNode->handle.valueInTxn && pNode->handle.valueInTxn != TQ_DELETE_TOKEN) { - pMeta->pDeleter(pNode->handle.valueInTxn); - } - if (pNode->handle.valueInUse && pNode->handle.valueInUse != TQ_DELETE_TOKEN) { - pMeta->pDeleter(pNode->handle.valueInUse); - } - STqMetaList* next = pNode->next; - taosMemoryFree(pNode); - pNode = next; - } - } - taosMemoryFree(pMeta->dirPath); - taosMemoryFree(pMeta->unpersistHead); - taosMemoryFree(pMeta); - return 0; -} - -int32_t tqStoreDelete(STqMetaStore* pMeta) { - taosCloseFile(&pMeta->pFile); - taosCloseFile(&pMeta->pIdxFile); - // free memory - for (int i = 0; i < TQ_BUCKET_SIZE; i++) { - STqMetaList* pNode = pMeta->bucket[i]; - pMeta->bucket[i] = NULL; - while (pNode) { - if (pNode->handle.valueInTxn && pNode->handle.valueInTxn != TQ_DELETE_TOKEN) { - pMeta->pDeleter(pNode->handle.valueInTxn); - } - if (pNode->handle.valueInUse && pNode->handle.valueInUse != TQ_DELETE_TOKEN) { - pMeta->pDeleter(pNode->handle.valueInUse); - } - STqMetaList* next = pNode->next; - taosMemoryFree(pNode); - pNode = next; - } - } - taosMemoryFree(pMeta->unpersistHead); - taosRemoveDir(pMeta->dirPath); - taosMemoryFree(pMeta->dirPath); - taosMemoryFree(pMeta); - return 0; -} - -int32_t tqStorePersist(STqMetaStore* pMeta) { - STqIdxPageBuf idxBuf; - int64_t* bufPtr = (int64_t*)idxBuf.buffer; - STqMetaList* pHead = pMeta->unpersistHead; - STqMetaList* pNode = pHead->unpersistNext; - STqSerializedHead* pSHead = taosMemoryMalloc(sizeof(STqSerializedHead)); - if (pSHead == NULL) { - terrno = TSDB_CODE_TQ_OUT_OF_MEMORY; - return -1; - } - pSHead->ver = TQ_SVER; - pSHead->checksum = 0; - pSHead->ssize = sizeof(STqSerializedHead); - /*int allocatedSize = sizeof(STqSerializedHead);*/ - int offset = taosLSeekFile(pMeta->pFile, 0, SEEK_CUR); - - tqReadLastPage(pMeta->pIdxFile, &idxBuf); - - if (idxBuf.head.writeOffset == TQ_PAGE_SIZE) { - taosLSeekFile(pMeta->pIdxFile, 0, SEEK_END); - memset(&idxBuf, 0, TQ_PAGE_SIZE); - idxBuf.head.writeOffset = TQ_IDX_PAGE_HEAD_SIZE; - } else { - bufPtr = POINTER_SHIFT(&idxBuf, idxBuf.head.writeOffset); - } - - while (pHead != pNode) { - int nBytes = 0; - - if (pNode->handle.valueInUse) { - if (pNode->handle.valueInTxn) { - pSHead->action = TQ_ACTION_INUSE_CONT; - } else { - pSHead->action = TQ_ACTION_INUSE; - } - - if (pNode->handle.valueInUse == TQ_DELETE_TOKEN) { - pSHead->ssize = sizeof(STqSerializedHead); - } else { - pMeta->pSerializer(pNode->handle.valueInUse, &pSHead); - } - nBytes = taosWriteFile(pMeta->pFile, pSHead, pSHead->ssize); - ASSERT(nBytes == pSHead->ssize); - } - - if (pNode->handle.valueInTxn) { - pSHead->action = TQ_ACTION_INTXN; - if (pNode->handle.valueInTxn == TQ_DELETE_TOKEN) { - pSHead->ssize = sizeof(STqSerializedHead); - } else { - pMeta->pSerializer(pNode->handle.valueInTxn, &pSHead); - } - int nBytesTxn = taosWriteFile(pMeta->pFile, pSHead, pSHead->ssize); - ASSERT(nBytesTxn == pSHead->ssize); - nBytes += nBytesTxn; - } - pNode->handle.offset = offset; - offset += nBytes; - - // write idx file - // TODO: endian check and convert - *(bufPtr++) = pNode->handle.key; - *(bufPtr++) = pNode->handle.offset; - *(bufPtr++) = (int64_t)nBytes; - idxBuf.head.writeOffset += TQ_IDX_SIZE; - - if (idxBuf.head.writeOffset >= TQ_PAGE_SIZE) { - nBytes = taosWriteFile(pMeta->pIdxFile, &idxBuf, TQ_PAGE_SIZE); - // TODO: handle error with tfile - ASSERT(nBytes == TQ_PAGE_SIZE); - memset(&idxBuf, 0, TQ_PAGE_SIZE); - idxBuf.head.writeOffset = TQ_IDX_PAGE_HEAD_SIZE; - bufPtr = (int64_t*)&idxBuf.buffer; - } - // remove from unpersist list - pHead->unpersistNext = pNode->unpersistNext; - pHead->unpersistNext->unpersistPrev = pHead; - pNode->unpersistPrev = pNode->unpersistNext = NULL; - pNode = pHead->unpersistNext; - - // remove from bucket - if (pNode->handle.valueInUse == TQ_DELETE_TOKEN && pNode->handle.valueInTxn == NULL) { - int bucketKey = pNode->handle.key & TQ_BUCKET_MASK; - STqMetaList* pBucketHead = pMeta->bucket[bucketKey]; - if (pBucketHead == pNode) { - pMeta->bucket[bucketKey] = pNode->next; - } else { - STqMetaList* pBucketNode = pBucketHead; - while (pBucketNode->next != NULL && pBucketNode->next != pNode) { - pBucketNode = pBucketNode->next; - } - // impossible for pBucket->next == NULL - ASSERT(pBucketNode->next == pNode); - pBucketNode->next = pNode->next; - } - taosMemoryFree(pNode); - } - } - - // write left bytes - taosMemoryFree(pSHead); - // TODO: write new version in tfile - if ((char*)bufPtr != idxBuf.buffer) { - int nBytes = taosWriteFile(pMeta->pIdxFile, &idxBuf, idxBuf.head.writeOffset); - // TODO: handle error in tfile - ASSERT(nBytes == idxBuf.head.writeOffset); - } - // TODO: using fsync in tfile - taosFsyncFile(pMeta->pIdxFile); - taosFsyncFile(pMeta->pFile); - return 0; -} - -static int32_t tqHandlePutCommitted(STqMetaStore* pMeta, int64_t key, void* value) { - int64_t bucketKey = key & TQ_BUCKET_MASK; - STqMetaList* pNode = pMeta->bucket[bucketKey]; - while (pNode) { - if (pNode->handle.key == key) { - if (pNode->handle.valueInUse && pNode->handle.valueInUse != TQ_DELETE_TOKEN) { - pMeta->pDeleter(pNode->handle.valueInUse); - } - // change pointer ownership - pNode->handle.valueInUse = value; - return 0; - } else { - pNode = pNode->next; - } - } - STqMetaList* pNewNode = taosMemoryCalloc(1, sizeof(STqMetaList)); - if (pNewNode == NULL) { - terrno = TSDB_CODE_TQ_OUT_OF_MEMORY; - return -1; - } - pNewNode->handle.key = key; - pNewNode->handle.valueInUse = value; - pNewNode->next = pMeta->bucket[bucketKey]; - // put into unpersist list - pNewNode->unpersistPrev = pMeta->unpersistHead; - pNewNode->unpersistNext = pMeta->unpersistHead->unpersistNext; - pMeta->unpersistHead->unpersistNext->unpersistPrev = pNewNode; - pMeta->unpersistHead->unpersistNext = pNewNode; - return 0; -} - -void* tqHandleGet(STqMetaStore* pMeta, int64_t key) { - int64_t bucketKey = key & TQ_BUCKET_MASK; - STqMetaList* pNode = pMeta->bucket[bucketKey]; - while (pNode) { - if (pNode->handle.key == key) { - if (pNode->handle.valueInUse != NULL && pNode->handle.valueInUse != TQ_DELETE_TOKEN) { - return pNode->handle.valueInUse; - } else { - return NULL; - } - } else { - pNode = pNode->next; - } - } - return NULL; -} - -void* tqHandleTouchGet(STqMetaStore* pMeta, int64_t key) { - int64_t bucketKey = key & TQ_BUCKET_MASK; - STqMetaList* pNode = pMeta->bucket[bucketKey]; - while (pNode) { - if (pNode->handle.key == key) { - if (pNode->handle.valueInUse != NULL && pNode->handle.valueInUse != TQ_DELETE_TOKEN) { - tqLinkUnpersist(pMeta, pNode); - return pNode->handle.valueInUse; - } else { - return NULL; - } - } else { - pNode = pNode->next; - } - } - return NULL; -} - -static inline int32_t tqHandlePutImpl(STqMetaStore* pMeta, int64_t key, void* value) { - int64_t bucketKey = key & TQ_BUCKET_MASK; - STqMetaList* pNode = pMeta->bucket[bucketKey]; - while (pNode) { - if (pNode->handle.key == key) { - if (pNode->handle.valueInTxn) { - if (tqDupIntxnReject(pMeta->tqConfigFlag)) { - terrno = TSDB_CODE_TQ_META_KEY_DUP_IN_TXN; - return -1; - } - if (pNode->handle.valueInTxn != TQ_DELETE_TOKEN) { - pMeta->pDeleter(pNode->handle.valueInTxn); - } - } - pNode->handle.valueInTxn = value; - tqLinkUnpersist(pMeta, pNode); - return 0; - } else { - pNode = pNode->next; - } - } - STqMetaList* pNewNode = taosMemoryCalloc(1, sizeof(STqMetaList)); - if (pNewNode == NULL) { - terrno = TSDB_CODE_TQ_OUT_OF_MEMORY; - return -1; - } - pNewNode->handle.key = key; - pNewNode->handle.valueInTxn = value; - pNewNode->next = pMeta->bucket[bucketKey]; - pMeta->bucket[bucketKey] = pNewNode; - tqLinkUnpersist(pMeta, pNewNode); - return 0; -} - -int32_t tqHandleMovePut(STqMetaStore* pMeta, int64_t key, void* value) { return tqHandlePutImpl(pMeta, key, value); } - -int32_t tqHandleCopyPut(STqMetaStore* pMeta, int64_t key, void* value, size_t vsize) { - void* vmem = taosMemoryMalloc(vsize); - if (vmem == NULL) { - terrno = TSDB_CODE_TQ_OUT_OF_MEMORY; - return -1; - } - memcpy(vmem, value, vsize); - return tqHandlePutImpl(pMeta, key, vmem); -} - -static void* tqHandleGetUncommitted(STqMetaStore* pMeta, int64_t key) { - int64_t bucketKey = key & TQ_BUCKET_MASK; - STqMetaList* pNode = pMeta->bucket[bucketKey]; - while (pNode) { - if (pNode->handle.key == key) { - if (pNode->handle.valueInTxn != NULL && pNode->handle.valueInTxn != TQ_DELETE_TOKEN) { - return pNode->handle.valueInTxn; - } else { - return NULL; - } - } else { - pNode = pNode->next; - } - } - return NULL; -} - -int32_t tqHandleCommit(STqMetaStore* pMeta, int64_t key) { - int64_t bucketKey = key & TQ_BUCKET_MASK; - STqMetaList* pNode = pMeta->bucket[bucketKey]; - while (pNode) { - if (pNode->handle.key == key) { - if (pNode->handle.valueInTxn == NULL) { - terrno = TSDB_CODE_TQ_META_KEY_NOT_IN_TXN; - return -1; - } - if (pNode->handle.valueInUse && pNode->handle.valueInUse != TQ_DELETE_TOKEN) { - pMeta->pDeleter(pNode->handle.valueInUse); - } - pNode->handle.valueInUse = pNode->handle.valueInTxn; - pNode->handle.valueInTxn = NULL; - tqLinkUnpersist(pMeta, pNode); - return 0; - } else { - pNode = pNode->next; - } - } - terrno = TSDB_CODE_TQ_META_NO_SUCH_KEY; - return -1; -} - -int32_t tqHandleAbort(STqMetaStore* pMeta, int64_t key) { - int64_t bucketKey = key & TQ_BUCKET_MASK; - STqMetaList* pNode = pMeta->bucket[bucketKey]; - while (pNode) { - if (pNode->handle.key == key) { - if (pNode->handle.valueInTxn) { - if (pNode->handle.valueInTxn != TQ_DELETE_TOKEN) { - pMeta->pDeleter(pNode->handle.valueInTxn); - } - pNode->handle.valueInTxn = NULL; - tqLinkUnpersist(pMeta, pNode); - return 0; - } - terrno = TSDB_CODE_TQ_META_KEY_NOT_IN_TXN; - return -1; - } else { - pNode = pNode->next; - } - } - terrno = TSDB_CODE_TQ_META_NO_SUCH_KEY; - return -1; -} - -int32_t tqHandleDel(STqMetaStore* pMeta, int64_t key) { - int64_t bucketKey = key & TQ_BUCKET_MASK; - STqMetaList* pNode = pMeta->bucket[bucketKey]; - while (pNode) { - if (pNode->handle.key == key) { - if (pNode->handle.valueInTxn != TQ_DELETE_TOKEN) { - if (pNode->handle.valueInTxn) { - pMeta->pDeleter(pNode->handle.valueInTxn); - } - - pNode->handle.valueInTxn = TQ_DELETE_TOKEN; - tqLinkUnpersist(pMeta, pNode); - return 0; - } - } else { - pNode = pNode->next; - } - } - terrno = TSDB_CODE_TQ_META_NO_SUCH_KEY; - return -1; -} - -int32_t tqHandlePurge(STqMetaStore* pMeta, int64_t key) { - int64_t bucketKey = key & TQ_BUCKET_MASK; - STqMetaList* pNode = pMeta->bucket[bucketKey]; - while (pNode) { - if (pNode->handle.key == key) { - pNode->handle.valueInUse = TQ_DELETE_TOKEN; - tqLinkUnpersist(pMeta, pNode); - return 0; - } else { - pNode = pNode->next; - } - } - terrno = TSDB_CODE_TQ_META_NO_SUCH_KEY; - return -1; -} - -// TODO: clean deleted idx and data from persistent file -int32_t tqStoreCompact(STqMetaStore* pMeta) { return 0; } diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 918660a9ec06770a2a8f880ab21102fbb60e4897..9f4c5fc81e05f7a39cd76612af0809f42f01700e 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -83,20 +83,30 @@ bool tqNextDataBlockFilterOut(STqReadHandle* pHandle, SHashObj* filterOutUids) { int32_t tqRetrieveDataBlock(SArray** ppCols, STqReadHandle* pHandle, uint64_t* pGroupId, uint64_t* pUid, int32_t* pNumOfRows, int16_t* pNumOfCols) { - /*int32_t sversion = pHandle->pBlock->sversion;*/ - // TODO set to real sversion *pUid = 0; - int32_t sversion = 1; + // TODO set to real sversion + /*int32_t sversion = 1;*/ + int32_t sversion = htonl(pHandle->pBlock->sversion); if (pHandle->sver != sversion || pHandle->cachedSchemaUid != pHandle->msgIter.suid) { pHandle->pSchema = metaGetTbTSchema(pHandle->pVnodeMeta, pHandle->msgIter.uid, sversion); if (pHandle->pSchema == NULL) { - tqError("cannot found schema for table: %ld, version %d", pHandle->msgIter.suid, pHandle->sver); + tqWarn("cannot found tsschema for table: uid: %ld (suid: %ld), version %d, possibly dropped table", + pHandle->msgIter.uid, pHandle->msgIter.suid, pHandle->sver); + /*ASSERT(0);*/ + terrno = TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND; return -1; } // this interface use suid instead of uid pHandle->pSchemaWrapper = metaGetTableSchema(pHandle->pVnodeMeta, pHandle->msgIter.suid, sversion, true); + if (pHandle->pSchemaWrapper == NULL) { + tqWarn("cannot found schema wrapper for table: suid: %ld, version %d, possibly dropped table", + pHandle->msgIter.suid, pHandle->sver); + /*ASSERT(0);*/ + terrno = TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND; + return -1; + } pHandle->sver = sversion; pHandle->cachedSchemaUid = pHandle->msgIter.suid; } diff --git a/source/dnode/vnode/src/tsdb/tsdbCommit.c b/source/dnode/vnode/src/tsdb/tsdbCommit.c index 28b0ae042d5a352d58559fa6e8108cdac7a02157..88d8ee9f9250f0139c19f3f9e2b0f8a553dc0520 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCommit.c +++ b/source/dnode/vnode/src/tsdb/tsdbCommit.c @@ -84,8 +84,8 @@ static int tsdbMergeBlockData(SCommitH *pCommith, SCommitIter *pIter, SDataCols static void tsdbResetCommitTable(SCommitH *pCommith); static void tsdbCloseCommitFile(SCommitH *pCommith, bool hasError); static bool tsdbCanAddSubBlock(SCommitH *pCommith, SBlock *pBlock, SMergeInfo *pInfo); -static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIter *pCommitIter, SDataCols *pTarget, - TSKEY maxKey, int maxRows, int8_t update); +static void tsdbLoadAndMergeFromCache(STsdb *pTsdb, SDataCols *pDataCols, int *iter, SCommitIter *pCommitIter, + SDataCols *pTarget, TSKEY maxKey, int maxRows, int8_t update); int tsdbWriteBlockIdx(SDFile *pHeadf, SArray *pIdxA, void **ppBuf); int tsdbApplyRtnOnFSet(STsdb *pRepo, SDFileSet *pSet, SRtn *pRtn) { @@ -301,7 +301,8 @@ static void tsdbSeekCommitIter(SCommitH *pCommith, TSKEY key) { SCommitIter *pIter = pCommith->iters + i; if (pIter->pTable == NULL || pIter->pIter == NULL) continue; - tsdbLoadDataFromCache(pIter->pTable, pIter->pIter, key - 1, INT32_MAX, NULL, NULL, 0, true, NULL); + tsdbLoadDataFromCache(TSDB_COMMIT_REPO(pCommith), pIter->pTable, pIter->pIter, key - 1, INT32_MAX, NULL, NULL, 0, + true, NULL); } } @@ -465,7 +466,7 @@ static int tsdbCreateCommitIters(SCommitH *pCommith) { pTbData = (STbData *)pNode->pData; pCommitIter = pCommith->iters + i; - pTSchema = metaGetTbTSchema(REPO_META(pRepo), pTbData->uid, 1); // TODO: schema version + pTSchema = metaGetTbTSchema(REPO_META(pRepo), pTbData->uid, -1); if (pTSchema) { pCommitIter->pIter = tSkipListCreateIter(pTbData->pData); @@ -474,7 +475,8 @@ static int tsdbCreateCommitIters(SCommitH *pCommith) { pCommitIter->pTable = (STable *)taosMemoryMalloc(sizeof(STable)); pCommitIter->pTable->uid = pTbData->uid; pCommitIter->pTable->tid = pTbData->uid; - pCommitIter->pTable->pSchema = pTSchema; // metaGetTbTSchema(REPO_META(pRepo), pTbData->uid, 0); + pCommitIter->pTable->pSchema = pTSchema; + pCommitIter->pTable->pCacheSchema = NULL; } } tSkipListDestroyIter(pSlIter); @@ -489,6 +491,7 @@ static void tsdbDestroyCommitIters(SCommitH *pCommith) { tSkipListDestroyIter(pCommith->iters[i].pIter); if (pCommith->iters[i].pTable) { tdFreeSchema(pCommith->iters[i].pTable->pSchema); + tdFreeSchema(pCommith->iters[i].pTable->pCacheSchema); taosMemoryFreeClear(pCommith->iters[i].pTable); } } @@ -913,7 +916,7 @@ static int tsdbMoveBlkIdx(SCommitH *pCommith, SBlockIdx *pIdx) { while (bidx < nBlocks) { if (!pTSchema && !tsdbCommitIsSameFile(pCommith, bidx)) { // Set commit table - pTSchema = metaGetTbTSchema(REPO_META(pTsdb), pIdx->uid, 1); // TODO: schema version + pTSchema = metaGetTbTSchema(REPO_META(pTsdb), pIdx->uid, -1); // TODO: schema version if (!pTSchema) { terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; @@ -947,7 +950,7 @@ static int tsdbMoveBlkIdx(SCommitH *pCommith, SBlockIdx *pIdx) { } static int tsdbSetCommitTable(SCommitH *pCommith, STable *pTable) { - STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1); + STSchema *pSchema = tsdbGetTableSchemaImpl(TSDB_COMMIT_REPO(pCommith), pTable, false, false, -1); pCommith->pTable = pTable; @@ -1138,6 +1141,9 @@ int tsdbWriteBlockImpl(STsdb *pRepo, STable *pTable, SDFile *pDFile, SDFile *pDF memcpy(tptr, pDataCol->pData, flen); if (tBitmaps > 0) { bptr = POINTER_SHIFT(pBlockData, lsize + flen); + if (isSuper && !tdDataColsIsBitmapI(pDataCols)) { + tdMergeBitmap((uint8_t *)pDataCol->pBitmap, rowsToWrite, (uint8_t *)pDataCol->pBitmap); + } memcpy(bptr, pDataCol->pBitmap, tBitmaps); tBitmapsLen = tBitmaps; flen += tBitmapsLen; @@ -1251,8 +1257,8 @@ static int tsdbCommitMemData(SCommitH *pCommith, SCommitIter *pIter, TSKEY keyLi SBlock block; while (true) { - tsdbLoadDataFromCache(pIter->pTable, pIter->pIter, keyLimit, defaultRows, pCommith->pDataCols, NULL, 0, - pCfg->update, &mInfo); + tsdbLoadDataFromCache(TSDB_COMMIT_REPO(pCommith), pIter->pTable, pIter->pIter, keyLimit, defaultRows, + pCommith->pDataCols, NULL, 0, pCfg->update, &mInfo); if (pCommith->pDataCols->numOfRows <= 0) break; @@ -1295,8 +1301,9 @@ static int tsdbMergeMemData(SCommitH *pCommith, SCommitIter *pIter, int bidx) { SSkipListIterator titer = *(pIter->pIter); if (tsdbLoadBlockDataCols(&(pCommith->readh), pBlock, NULL, &colId, 1, false) < 0) return -1; - tsdbLoadDataFromCache(pIter->pTable, &titer, keyLimit, INT32_MAX, NULL, pCommith->readh.pDCols[0]->cols[0].pData, - pCommith->readh.pDCols[0]->numOfRows, pCfg->update, &mInfo); + tsdbLoadDataFromCache(TSDB_COMMIT_REPO(pCommith), pIter->pTable, &titer, keyLimit, INT32_MAX, NULL, + pCommith->readh.pDCols[0]->cols[0].pData, pCommith->readh.pDCols[0]->numOfRows, pCfg->update, + &mInfo); if (mInfo.nOperations == 0) { // no new data to insert (all updates denied) @@ -1310,9 +1317,9 @@ static int tsdbMergeMemData(SCommitH *pCommith, SCommitIter *pIter, int bidx) { *(pIter->pIter) = titer; } else if (tsdbCanAddSubBlock(pCommith, pBlock, &mInfo)) { // Add a sub-block - tsdbLoadDataFromCache(pIter->pTable, pIter->pIter, keyLimit, INT32_MAX, pCommith->pDataCols, - pCommith->readh.pDCols[0]->cols[0].pData, pCommith->readh.pDCols[0]->numOfRows, pCfg->update, - &mInfo); + tsdbLoadDataFromCache(TSDB_COMMIT_REPO(pCommith), pIter->pTable, pIter->pIter, keyLimit, INT32_MAX, + pCommith->pDataCols, pCommith->readh.pDCols[0]->cols[0].pData, + pCommith->readh.pDCols[0]->numOfRows, pCfg->update, &mInfo); if (pBlock->last) { pDFile = TSDB_COMMIT_LAST_FILE(pCommith); } else { @@ -1417,8 +1424,8 @@ static int tsdbMergeBlockData(SCommitH *pCommith, SCommitIter *pIter, SDataCols int biter = 0; while (true) { - tsdbLoadAndMergeFromCache(pCommith->readh.pDCols[0], &biter, pIter, pCommith->pDataCols, keyLimit, defaultRows, - pCfg->update); + tsdbLoadAndMergeFromCache(TSDB_COMMIT_REPO(pCommith), pCommith->readh.pDCols[0], &biter, pIter, pCommith->pDataCols, + keyLimit, defaultRows, pCfg->update); if (pCommith->pDataCols->numOfRows == 0) break; @@ -1442,8 +1449,8 @@ static int tsdbMergeBlockData(SCommitH *pCommith, SCommitIter *pIter, SDataCols return 0; } -static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIter *pCommitIter, SDataCols *pTarget, - TSKEY maxKey, int maxRows, int8_t update) { +static void tsdbLoadAndMergeFromCache(STsdb *pTsdb, SDataCols *pDataCols, int *iter, SCommitIter *pCommitIter, + SDataCols *pTarget, TSKEY maxKey, int maxRows, int8_t update) { TSKEY key1 = INT64_MAX; TSKEY key2 = INT64_MAX; TSKEY lastKey = TSKEY_INITIAL_VAL; @@ -1484,7 +1491,7 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt ++(*iter); } else if (key1 > key2) { if (pSchema == NULL || schemaVersion(pSchema) != TD_ROW_SVER(row)) { - pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, TD_ROW_SVER(row)); + pSchema = tsdbGetTableSchemaImpl(pTsdb, pCommitIter->pTable, false, false, TD_ROW_SVER(row)); ASSERT(pSchema != NULL); } @@ -1503,13 +1510,16 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt tSkipListIterNext(pCommitIter->pIter); } else { if (lastKey != key1) { + if (lastKey != TSKEY_INITIAL_VAL) { + ++pTarget->numOfRows; + } lastKey = key1; - ++pTarget->numOfRows; } // copy disk data for (int i = 0; i < pDataCols->numOfCols; ++i) { SCellVal sVal = {0}; + // no duplicated TS keys in pDataCols from file if (tdGetColDataOfRow(&sVal, pDataCols->cols + i, *iter, pDataCols->bitmapMode) < 0) { TASSERT(0); } @@ -1521,7 +1531,7 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt if (TD_SUPPORT_UPDATE(update)) { // copy mem data(Multi-Version) if (pSchema == NULL || schemaVersion(pSchema) != TD_ROW_SVER(row)) { - pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, TD_ROW_SVER(row)); + pSchema = tsdbGetTableSchemaImpl(pTsdb, pCommitIter->pTable, false, false, TD_ROW_SVER(row)); ASSERT(pSchema != NULL); } diff --git a/source/dnode/vnode/src/tsdb/tsdbMemTable.c b/source/dnode/vnode/src/tsdb/tsdbMemTable.c index d8426db12719f4bc27915c07b6ec9e5235b5e47c..9b9a431b5008c806adfbbf3172f61830129c3bdb 100644 --- a/source/dnode/vnode/src/tsdb/tsdbMemTable.c +++ b/source/dnode/vnode/src/tsdb/tsdbMemTable.c @@ -20,7 +20,8 @@ static void tsdbFreeTbData(STbData *pTbData); static char *tsdbGetTsTupleKey(const void *data); static int tsdbTbDataComp(const void *arg1, const void *arg2); static char *tsdbTbDataGetUid(const void *arg); -static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema **ppSchema, STSRow *row, bool merge); +static int tsdbAppendTableRowToCols(STsdb *pTsdb, STable *pTable, SDataCols *pCols, STSchema **ppSchema, STSRow *row, + bool merge); int tsdbMemTableCreate(STsdb *pTsdb, STsdbMemTable **ppMemTable) { STsdbMemTable *pMemTable; @@ -88,8 +89,8 @@ void tsdbMemTableDestroy(STsdb *pTsdb, STsdbMemTable *pMemTable) { * * The function tries to procceed AS MUCH AS POSSIBLE. */ -int tsdbLoadDataFromCache(STable *pTable, SSkipListIterator *pIter, TSKEY maxKey, int maxRowsToRead, SDataCols *pCols, - TKEY *filterKeys, int nFilterKeys, bool keepDup, SMergeInfo *pMergeInfo) { +int tsdbLoadDataFromCache(STsdb *pTsdb, STable *pTable, SSkipListIterator *pIter, TSKEY maxKey, int maxRowsToRead, + SDataCols *pCols, TKEY *filterKeys, int nFilterKeys, bool keepDup, SMergeInfo *pMergeInfo) { ASSERT(maxRowsToRead > 0 && nFilterKeys >= 0); if (pIter == NULL) return 0; STSchema *pSchema = NULL; @@ -222,12 +223,12 @@ int tsdbLoadDataFromCache(STable *pTable, SSkipListIterator *pIter, TSKEY maxKey if (lastKey != TSKEY_INITIAL_VAL) { ++pCols->numOfRows; } - tsdbAppendTableRowToCols(pTable, pCols, &pSchema, row, false); + tsdbAppendTableRowToCols(pTsdb, pTable, pCols, &pSchema, row, false); } lastKey = rowKey; } else { if (keepDup) { - tsdbAppendTableRowToCols(pTable, pCols, &pSchema, row, true); + tsdbAppendTableRowToCols(pTsdb, pTable, pCols, &pSchema, row, true); } else { // discard } @@ -249,7 +250,7 @@ int tsdbLoadDataFromCache(STable *pTable, SSkipListIterator *pIter, TSKEY maxKey if (pCols && pMergeInfo->nOperations >= pCols->maxPoints) break; pMergeInfo->rowsDeleteSucceed++; pMergeInfo->nOperations++; - tsdbAppendTableRowToCols(pTable, pCols, &pSchema, row, false); + tsdbAppendTableRowToCols(pTsdb, pTable, pCols, &pSchema, row, false); } else { if (keepDup) { if (pCols && pMergeInfo->nOperations >= pCols->maxPoints) break; @@ -262,11 +263,11 @@ int tsdbLoadDataFromCache(STable *pTable, SSkipListIterator *pIter, TSKEY maxKey if (lastKey != TSKEY_INITIAL_VAL) { ++pCols->numOfRows; } - tsdbAppendTableRowToCols(pTable, pCols, &pSchema, row, false); + tsdbAppendTableRowToCols(pTsdb, pTable, pCols, &pSchema, row, false); } lastKey = rowKey; } else { - tsdbAppendTableRowToCols(pTable, pCols, &pSchema, row, true); + tsdbAppendTableRowToCols(pTsdb, pTable, pCols, &pSchema, row, true); } } else { pMergeInfo->keyFirst = TMIN(pMergeInfo->keyFirst, fKey); @@ -320,13 +321,13 @@ int tsdbInsertTableData(STsdb *pTsdb, SSubmitMsgIter *pMsgIter, SSubmitBlk *pBlo terrno = TSDB_CODE_PAR_TABLE_NOT_EXIST; return -1; } - strcat(pRsp->tblFName, mr.me.name); - + if(pRsp->tblFName) strcat(pRsp->tblFName, mr.me.name); + if (mr.me.type == TSDB_NORMAL_TABLE) { - sverNew = mr.me.ntbEntry.schema.sver; + sverNew = mr.me.ntbEntry.schemaRow.version; } else { metaGetTableEntryByUid(&mr, mr.me.ctbEntry.suid); - sverNew = mr.me.stbEntry.schema.sver; + sverNew = mr.me.stbEntry.schemaRow.version; } metaReaderClear(&mr); @@ -431,10 +432,12 @@ static char *tsdbTbDataGetUid(const void *arg) { STbData *pTbData = (STbData *)arg; return (char *)(&(pTbData->uid)); } -static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema **ppSchema, STSRow *row, bool merge) { + +static int tsdbAppendTableRowToCols(STsdb *pTsdb, STable *pTable, SDataCols *pCols, STSchema **ppSchema, STSRow *row, + bool merge) { if (pCols) { if (*ppSchema == NULL || schemaVersion(*ppSchema) != TD_ROW_SVER(row)) { - *ppSchema = tsdbGetTableSchemaImpl(pTable, false, false, TD_ROW_SVER(row)); + *ppSchema = tsdbGetTableSchemaImpl(pTsdb, pTable, false, false, TD_ROW_SVER(row)); if (*ppSchema == NULL) { ASSERT(false); return -1; diff --git a/source/dnode/vnode/src/tsdb/tsdbMemTable2.c b/source/dnode/vnode/src/tsdb/tsdbMemTable2.c index 2acca738fbc3f2950f537f3b4492fd56b03ccee3..025b2ab580163cf3e9b9031b24f1b07881d3ec61 100644 --- a/source/dnode/vnode/src/tsdb/tsdbMemTable2.c +++ b/source/dnode/vnode/src/tsdb/tsdbMemTable2.c @@ -38,7 +38,7 @@ struct SMemTable { struct SMemSkipListNode { int8_t level; - SMemSkipListNode *forwards[1]; // Windows does not allow 0 + SMemSkipListNode *forwards[1]; // Windows does not allow 0 }; struct SMemSkipList { @@ -46,7 +46,7 @@ struct SMemSkipList { int8_t maxLevel; int8_t level; int32_t size; - SMemSkipListNode pHead[1]; // Windows does not allow 0 + SMemSkipListNode pHead[1]; // Windows does not allow 0 }; struct SMemData { @@ -217,7 +217,7 @@ int32_t tsdbInsertData2(SMemTable *pMemTb, int64_t version, const SVSubmitBlk *p if (tDecodeIsEnd(&dc)) break; // decode row - if (tDecodeBinary(&dc, (const uint8_t **)&tRow.pRow, &tRow.szRow) < 0) { + if (tDecodeBinary(&dc, (uint8_t **)&tRow.pRow, &tRow.szRow) < 0) { terrno = TSDB_CODE_INVALID_MSG; return -1; } @@ -273,7 +273,7 @@ static FORCE_INLINE int32_t tsdbEncodeRow(SEncoder *pEncoder, const STsdbRow *pR static FORCE_INLINE int32_t tsdbDecodeRow(SDecoder *pDecoder, STsdbRow *pRow) { if (tDecodeI64(pDecoder, &pRow->version) < 0) return -1; - if (tDecodeBinary(pDecoder, (const uint8_t **)&pRow->pRow, &pRow->szRow) < 0) return -1; + if (tDecodeBinary(pDecoder, (uint8_t **)&pRow->pRow, &pRow->szRow) < 0) return -1; return 0; } diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 837ef9442952bd1874ed23db15ba541a9b96867f..62125b6dc7a5dadcbe534c4dab64b07e9964c3f2 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -13,6 +13,7 @@ * along with this program. If not, see . */ +#include "vnode.h" #include "tsdb.h" #define EXTRA_BYTES 2 @@ -140,16 +141,8 @@ typedef struct STsdbReadHandle { STSchema* pSchema; } STsdbReadHandle; -typedef struct STableGroupSupporter { - int32_t numOfCols; - SColIndex* pCols; - SSchema* pTagSchema; -} STableGroupSupporter; - -int32_t tsdbQueryTableList(void* pMeta, SArray* pRes, void* filterInfo); - -static STimeWindow updateLastrowForEachGroup(STableGroupInfo* groupList); -static int32_t checkForCachedLastRow(STsdbReadHandle* pTsdbReadHandle, STableGroupInfo* groupList); +static STimeWindow updateLastrowForEachGroup(STableListInfo* pList); +static int32_t checkForCachedLastRow(STsdbReadHandle* pTsdbReadHandle, STableListInfo* pList); static int32_t checkForCachedLast(STsdbReadHandle* pTsdbReadHandle); // static int32_t tsdbGetCachedLastRow(STable* pTable, STSRow** pRes, TSKEY* lastKey); @@ -213,12 +206,6 @@ int64_t tsdbGetNumOfRowsInMemTable(tsdbReaderT* pHandle) { return rows; } - // STableData* pMem = NULL; - // STableData* pIMem = NULL; - - // SMemTable* pMemT = pMemRef->snapshot.mem; - // SMemTable* pIMemT = pMemRef->snapshot.imem; - size_t size = taosArrayGetSize(pTsdbReadHandle->pTableCheckInfo); for (int32_t i = 0; i < size; ++i) { STableCheckInfo* pCheckInfo = taosArrayGet(pTsdbReadHandle->pTableCheckInfo, i); @@ -235,41 +222,34 @@ int64_t tsdbGetNumOfRowsInMemTable(tsdbReaderT* pHandle) { return rows; } -static SArray* createCheckInfoFromTableGroup(STsdbReadHandle* pTsdbReadHandle, STableGroupInfo* pGroupList) { - size_t numOfGroup = taosArrayGetSize(pGroupList->pGroupList); - assert(numOfGroup >= 1); +static SArray* createCheckInfoFromTableGroup(STsdbReadHandle* pTsdbReadHandle, STableListInfo* pTableList) { + size_t tableSize = taosArrayGetSize(pTableList->pTableList); + assert(tableSize >= 1); // allocate buffer in order to load data blocks from file - SArray* pTableCheckInfo = taosArrayInit(pGroupList->numOfTables, sizeof(STableCheckInfo)); + SArray* pTableCheckInfo = taosArrayInit(tableSize, sizeof(STableCheckInfo)); if (pTableCheckInfo == NULL) { return NULL; } // todo apply the lastkey of table check to avoid to load header file - for (int32_t i = 0; i < numOfGroup; ++i) { - SArray* group = *(SArray**)taosArrayGet(pGroupList->pGroupList, i); + for (int32_t j = 0; j < tableSize; ++j) { + STableKeyInfo* pKeyInfo = (STableKeyInfo*)taosArrayGet(pTableList->pTableList, j); - size_t gsize = taosArrayGetSize(group); - assert(gsize > 0); - - for (int32_t j = 0; j < gsize; ++j) { - STableKeyInfo* pKeyInfo = (STableKeyInfo*)taosArrayGet(group, j); - - STableCheckInfo info = {.lastKey = pKeyInfo->lastKey, .tableId = pKeyInfo->uid}; - if (ASCENDING_TRAVERSE(pTsdbReadHandle->order)) { - if (info.lastKey == INT64_MIN || info.lastKey < pTsdbReadHandle->window.skey) { - info.lastKey = pTsdbReadHandle->window.skey; - } - - assert(info.lastKey >= pTsdbReadHandle->window.skey && info.lastKey <= pTsdbReadHandle->window.ekey); - } else { + STableCheckInfo info = {.lastKey = pKeyInfo->lastKey, .tableId = pKeyInfo->uid}; + if (ASCENDING_TRAVERSE(pTsdbReadHandle->order)) { + if (info.lastKey == INT64_MIN || info.lastKey < pTsdbReadHandle->window.skey) { info.lastKey = pTsdbReadHandle->window.skey; } - taosArrayPush(pTableCheckInfo, &info); - tsdbDebug("%p check table uid:%" PRId64 " from lastKey:%" PRId64 " %s", pTsdbReadHandle, info.tableId, - info.lastKey, pTsdbReadHandle->idStr); + assert(info.lastKey >= pTsdbReadHandle->window.skey && info.lastKey <= pTsdbReadHandle->window.ekey); + } else { + info.lastKey = pTsdbReadHandle->window.skey; } + + taosArrayPush(pTableCheckInfo, &info); + tsdbDebug("%p check table uid:%" PRId64 " from lastKey:%" PRId64 " %s", pTsdbReadHandle, info.tableId, info.lastKey, + pTsdbReadHandle->idStr); } // TODO group table according to the tag value. @@ -326,28 +306,28 @@ static int64_t getEarliestValidTimestamp(STsdb* pTsdb) { return now - (tsTickPerMin[pCfg->precision] * pCfg->keep2) + 1; // needs to add one tick } -static void setQueryTimewindow(STsdbReadHandle* pTsdbReadHandle, SQueryTableDataCond* pCond) { - pTsdbReadHandle->window = pCond->twindow; +static void setQueryTimewindow(STsdbReadHandle* pTsdbReadHandle, SQueryTableDataCond* pCond, int32_t tWinIdx) { + pTsdbReadHandle->window = pCond->twindows[tWinIdx]; bool updateTs = false; int64_t startTs = getEarliestValidTimestamp(pTsdbReadHandle->pTsdb); if (ASCENDING_TRAVERSE(pTsdbReadHandle->order)) { if (startTs > pTsdbReadHandle->window.skey) { pTsdbReadHandle->window.skey = startTs; - pCond->twindow.skey = startTs; + pCond->twindows[tWinIdx].skey = startTs; updateTs = true; } } else { if (startTs > pTsdbReadHandle->window.ekey) { pTsdbReadHandle->window.ekey = startTs; - pCond->twindow.ekey = startTs; + pCond->twindows[tWinIdx].ekey = startTs; updateTs = true; } } if (updateTs) { tsdbDebug("%p update the query time window, old:%" PRId64 " - %" PRId64 ", new:%" PRId64 " - %" PRId64 ", %s", - pTsdbReadHandle, pCond->twindow.skey, pCond->twindow.ekey, pTsdbReadHandle->window.skey, + pTsdbReadHandle, pCond->twindows[tWinIdx].skey, pCond->twindows[tWinIdx].ekey, pTsdbReadHandle->window.skey, pTsdbReadHandle->window.ekey, pTsdbReadHandle->idStr); } } @@ -372,13 +352,16 @@ static STsdb* getTsdbByRetentions(SVnode* pVnode, STsdbReadHandle* pReadHandle, } if (level == TSDB_RETENTION_L0) { - tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle, TSDB_RETENTION_L0); + tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle, + TSDB_RETENTION_L0); return VND_RSMA0(pVnode); } else if (level == TSDB_RETENTION_L1) { - tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle, TSDB_RETENTION_L1); + tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle, + TSDB_RETENTION_L1); return VND_RSMA1(pVnode); } else { - tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle, TSDB_RETENTION_L2); + tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle, + TSDB_RETENTION_L2); return VND_RSMA2(pVnode); } } @@ -391,7 +374,7 @@ static STsdbReadHandle* tsdbQueryTablesImpl(SVnode* pVnode, SQueryTableDataCond* goto _end; } - STsdb* pTsdb = getTsdbByRetentions(pVnode, pReadHandle, pCond->twindow.skey, pVnode->config.tsdbCfg.retentions); + STsdb* pTsdb = getTsdbByRetentions(pVnode, pReadHandle, pCond->twindows[0].skey, pVnode->config.tsdbCfg.retentions); pReadHandle->order = pCond->order; pReadHandle->pTsdb = pTsdb; @@ -417,11 +400,11 @@ static STsdbReadHandle* tsdbQueryTablesImpl(SVnode* pVnode, SQueryTableDataCond* } assert(pCond != NULL); - setQueryTimewindow(pReadHandle, pCond); + setQueryTimewindow(pReadHandle, pCond, 0); if (pCond->numOfCols > 0) { int32_t rowLen = 0; - for(int32_t i = 0; i < pCond->numOfCols; ++i) { + for (int32_t i = 0; i < pCond->numOfCols; ++i) { rowLen += pCond->colList[i].bytes; } @@ -456,10 +439,10 @@ static STsdbReadHandle* tsdbQueryTablesImpl(SVnode* pVnode, SQueryTableDataCond* } pReadHandle->suppInfo.defaultLoadColumn = getDefaultLoadColumns(pReadHandle, true); - pReadHandle->suppInfo.slotIds = - taosMemoryMalloc(sizeof(int32_t) * taosArrayGetSize(pReadHandle->suppInfo.defaultLoadColumn)); - pReadHandle->suppInfo.plist = - taosMemoryCalloc(taosArrayGetSize(pReadHandle->suppInfo.defaultLoadColumn), POINTER_BYTES); + + size_t size = taosArrayGetSize(pReadHandle->suppInfo.defaultLoadColumn); + pReadHandle->suppInfo.slotIds = taosMemoryCalloc(size, sizeof(int32_t)); + pReadHandle->suppInfo.plist = taosMemoryCalloc(size, POINTER_BYTES); } pReadHandle->pDataCols = tdNewDataCols(1000, pVnode->config.tsdbCfg.maxRows); @@ -480,7 +463,40 @@ _end: return NULL; } -tsdbReaderT* tsdbQueryTables(SVnode* pVnode, SQueryTableDataCond* pCond, STableGroupInfo* groupList, uint64_t qId, +static int32_t setCurrentSchema(SVnode* pVnode, STsdbReadHandle* pTsdbReadHandle) { + STableCheckInfo* pCheckInfo = taosArrayGet(pTsdbReadHandle->pTableCheckInfo, 0); + + int32_t sversion = 1; + + SMetaReader mr = {0}; + metaReaderInit(&mr, pVnode->pMeta, 0); + int32_t code = metaGetTableEntryByUid(&mr, pCheckInfo->tableId); + if (code != TSDB_CODE_SUCCESS) { + terrno = TSDB_CODE_TDB_INVALID_TABLE_ID; + metaReaderClear(&mr); + return terrno; + } + + if (mr.me.type == TSDB_CHILD_TABLE) { + tb_uid_t suid = mr.me.ctbEntry.suid; + code = metaGetTableEntryByUid(&mr, suid); + if (code != TSDB_CODE_SUCCESS) { + terrno = TSDB_CODE_TDB_INVALID_TABLE_ID; + metaReaderClear(&mr); + return terrno; + } + sversion = mr.me.stbEntry.schemaRow.version; + } else { + ASSERT(mr.me.type == TSDB_NORMAL_TABLE); + sversion = mr.me.ntbEntry.schemaRow.version; + } + + metaReaderClear(&mr); + pTsdbReadHandle->pSchema = metaGetTbTSchema(pVnode->pMeta, pCheckInfo->tableId, sversion); + return TSDB_CODE_SUCCESS; +} + +tsdbReaderT* tsdbQueryTables(SVnode* pVnode, SQueryTableDataCond* pCond, STableListInfo* tableList, uint64_t qId, uint64_t taskId) { STsdbReadHandle* pTsdbReadHandle = tsdbQueryTablesImpl(pVnode, pCond, qId, taskId); if (pTsdbReadHandle == NULL) { @@ -492,16 +508,19 @@ tsdbReaderT* tsdbQueryTables(SVnode* pVnode, SQueryTableDataCond* pCond, STableG } // todo apply the lastkey of table check to avoid to load header file - pTsdbReadHandle->pTableCheckInfo = createCheckInfoFromTableGroup(pTsdbReadHandle, groupList); + pTsdbReadHandle->pTableCheckInfo = createCheckInfoFromTableGroup(pTsdbReadHandle, tableList); if (pTsdbReadHandle->pTableCheckInfo == NULL) { // tsdbCleanupReadHandle(pTsdbReadHandle); terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; return NULL; } - STableCheckInfo* pCheckInfo = taosArrayGet(pTsdbReadHandle->pTableCheckInfo, 0); + int32_t code = setCurrentSchema(pVnode, pTsdbReadHandle); + if (code != TSDB_CODE_SUCCESS) { + terrno = code; + return NULL; + } - pTsdbReadHandle->pSchema = metaGetTbTSchema(pVnode->pMeta, pCheckInfo->tableId, 1); int32_t numOfCols = taosArrayGetSize(pTsdbReadHandle->suppInfo.defaultLoadColumn); int16_t* ids = pTsdbReadHandle->suppInfo.defaultLoadColumn->pData; @@ -522,14 +541,14 @@ tsdbReaderT* tsdbQueryTables(SVnode* pVnode, SQueryTableDataCond* pCond, STableG } } - tsdbDebug("%p total numOfTable:%" PRIzu " in this query, group %" PRIzu " %s", pTsdbReadHandle, - taosArrayGetSize(pTsdbReadHandle->pTableCheckInfo), taosArrayGetSize(groupList->pGroupList), + tsdbDebug("%p total numOfTable:%" PRIzu " in this query, table %" PRIzu " %s", pTsdbReadHandle, + taosArrayGetSize(pTsdbReadHandle->pTableCheckInfo), taosArrayGetSize(tableList->pTableList), pTsdbReadHandle->idStr); return (tsdbReaderT)pTsdbReadHandle; } -void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond* pCond) { +void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond* pCond, int32_t tWinIdx) { STsdbReadHandle* pTsdbReadHandle = queryHandle; if (emptyQueryTimewindow(pTsdbReadHandle)) { @@ -542,7 +561,7 @@ void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond* pCond) { } pTsdbReadHandle->order = pCond->order; - pTsdbReadHandle->window = pCond->twindow; + setQueryTimewindow(pTsdbReadHandle, pCond, tWinIdx); pTsdbReadHandle->type = TSDB_QUERY_TYPE_ALL; pTsdbReadHandle->cur.fid = -1; pTsdbReadHandle->cur.win = TSWINDOW_INITIALIZER; @@ -567,11 +586,11 @@ void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond* pCond) { resetCheckInfo(pTsdbReadHandle); } -void tsdbResetQueryHandleForNewTable(tsdbReaderT queryHandle, SQueryTableDataCond* pCond, STableGroupInfo* groupList) { +void tsdbResetQueryHandleForNewTable(tsdbReaderT queryHandle, SQueryTableDataCond* pCond, STableListInfo* tableList, int32_t tWinIdx) { STsdbReadHandle* pTsdbReadHandle = queryHandle; pTsdbReadHandle->order = pCond->order; - pTsdbReadHandle->window = pCond->twindow; + pTsdbReadHandle->window = pCond->twindows[tWinIdx]; pTsdbReadHandle->type = TSDB_QUERY_TYPE_ALL; pTsdbReadHandle->cur.fid = -1; pTsdbReadHandle->cur.win = TSWINDOW_INITIALIZER; @@ -609,27 +628,27 @@ void tsdbResetQueryHandleForNewTable(tsdbReaderT queryHandle, SQueryTableDataCon // pTsdbReadHandle->next = doFreeColumnInfoData(pTsdbReadHandle->next); } -tsdbReaderT tsdbQueryLastRow(SVnode* pVnode, SQueryTableDataCond* pCond, STableGroupInfo* groupList, uint64_t qId, +tsdbReaderT tsdbQueryLastRow(SVnode* pVnode, SQueryTableDataCond* pCond, STableListInfo* pList, uint64_t qId, uint64_t taskId) { - pCond->twindow = updateLastrowForEachGroup(groupList); + pCond->twindows[0] = updateLastrowForEachGroup(pList); // no qualified table - if (groupList->numOfTables == 0) { + if (taosArrayGetSize(pList->pTableList) == 0) { return NULL; } - STsdbReadHandle* pTsdbReadHandle = (STsdbReadHandle*)tsdbQueryTables(pVnode, pCond, groupList, qId, taskId); + STsdbReadHandle* pTsdbReadHandle = (STsdbReadHandle*)tsdbQueryTables(pVnode, pCond, pList, qId, taskId); if (pTsdbReadHandle == NULL) { return NULL; } - int32_t code = checkForCachedLastRow(pTsdbReadHandle, groupList); + int32_t code = checkForCachedLastRow(pTsdbReadHandle, pList); if (code != TSDB_CODE_SUCCESS) { // set the numOfTables to be 0 terrno = code; return NULL; } - assert(pCond->order == TSDB_ORDER_ASC && pCond->twindow.skey <= pCond->twindow.ekey); + assert(pCond->order == TSDB_ORDER_ASC && pCond->twindows[0].skey <= pCond->twindows[0].ekey); if (pTsdbReadHandle->cachelastrow) { pTsdbReadHandle->type = TSDB_QUERY_TYPE_LAST; } @@ -669,60 +688,60 @@ SArray* tsdbGetQueriedTableList(tsdbReaderT* pHandle) { } // leave only one table for each group -static STableGroupInfo* trimTableGroup(STimeWindow* window, STableGroupInfo* pGroupList) { - assert(pGroupList); - size_t numOfGroup = taosArrayGetSize(pGroupList->pGroupList); - - STableGroupInfo* pNew = taosMemoryCalloc(1, sizeof(STableGroupInfo)); - pNew->pGroupList = taosArrayInit(numOfGroup, POINTER_BYTES); - - for (int32_t i = 0; i < numOfGroup; ++i) { - SArray* oneGroup = taosArrayGetP(pGroupList->pGroupList, i); - size_t numOfTables = taosArrayGetSize(oneGroup); - - SArray* px = taosArrayInit(4, sizeof(STableKeyInfo)); - for (int32_t j = 0; j < numOfTables; ++j) { - STableKeyInfo* pInfo = (STableKeyInfo*)taosArrayGet(oneGroup, j); - // if (window->skey <= pInfo->lastKey && ((STable*)pInfo->pTable)->lastKey != TSKEY_INITIAL_VAL) { - // taosArrayPush(px, pInfo); - // pNew->numOfTables += 1; - // break; - // } - } - - // there are no data in this group - if (taosArrayGetSize(px) == 0) { - taosArrayDestroy(px); - } else { - taosArrayPush(pNew->pGroupList, &px); - } - } - - return pNew; -} - -tsdbReaderT tsdbQueryRowsInExternalWindow(SVnode* pVnode, SQueryTableDataCond* pCond, STableGroupInfo* groupList, - uint64_t qId, uint64_t taskId) { - STableGroupInfo* pNew = trimTableGroup(&pCond->twindow, groupList); - - if (pNew->numOfTables == 0) { - tsdbDebug("update query time range to invalidate time window"); - - assert(taosArrayGetSize(pNew->pGroupList) == 0); - bool asc = ASCENDING_TRAVERSE(pCond->order); - if (asc) { - pCond->twindow.ekey = pCond->twindow.skey - 1; - } else { - pCond->twindow.skey = pCond->twindow.ekey - 1; - } - } - - STsdbReadHandle* pTsdbReadHandle = (STsdbReadHandle*)tsdbQueryTables(pVnode, pCond, pNew, qId, taskId); - pTsdbReadHandle->loadExternalRow = true; - pTsdbReadHandle->currentLoadExternalRows = true; +// static STableGroupInfo* trimTableGroup(STimeWindow* window, STableGroupInfo* pGroupList) { +// assert(pGroupList); +// size_t numOfGroup = taosArrayGetSize(pGroupList->pGroupList); +// +// STableGroupInfo* pNew = taosMemoryCalloc(1, sizeof(STableGroupInfo)); +// pNew->pGroupList = taosArrayInit(numOfGroup, POINTER_BYTES); +// +// for (int32_t i = 0; i < numOfGroup; ++i) { +// SArray* oneGroup = taosArrayGetP(pGroupList->pGroupList, i); +// size_t numOfTables = taosArrayGetSize(oneGroup); +// +// SArray* px = taosArrayInit(4, sizeof(STableKeyInfo)); +// for (int32_t j = 0; j < numOfTables; ++j) { +// STableKeyInfo* pInfo = (STableKeyInfo*)taosArrayGet(oneGroup, j); +// // if (window->skey <= pInfo->lastKey && ((STable*)pInfo->pTable)->lastKey != TSKEY_INITIAL_VAL) { +// // taosArrayPush(px, pInfo); +// // pNew->numOfTables += 1; +// // break; +// // } +// } +// +// // there are no data in this group +// if (taosArrayGetSize(px) == 0) { +// taosArrayDestroy(px); +// } else { +// taosArrayPush(pNew->pGroupList, &px); +// } +// } +// +// return pNew; +//} - return pTsdbReadHandle; -} +// tsdbReaderT tsdbQueryRowsInExternalWindow(SVnode* pVnode, SQueryTableDataCond* pCond, STableGroupInfo* groupList, +// uint64_t qId, uint64_t taskId) { +// STableGroupInfo* pNew = trimTableGroup(&pCond->twindow, groupList); +// +// if (pNew->numOfTables == 0) { +// tsdbDebug("update query time range to invalidate time window"); +// +// assert(taosArrayGetSize(pNew->pGroupList) == 0); +// bool asc = ASCENDING_TRAVERSE(pCond->order); +// if (asc) { +// pCond->twindow.ekey = pCond->twindow.skey - 1; +// } else { +// pCond->twindow.skey = pCond->twindow.ekey - 1; +// } +// } +// +// STsdbReadHandle* pTsdbReadHandle = (STsdbReadHandle*)tsdbQueryTables(pVnode, pCond, pNew, qId, taskId); +// pTsdbReadHandle->loadExternalRow = true; +// pTsdbReadHandle->currentLoadExternalRows = true; +// +// return pTsdbReadHandle; +//} static bool initTableMemIterator(STsdbReadHandle* pHandle, STableCheckInfo* pCheckInfo) { if (pCheckInfo->initBuf) { @@ -1308,7 +1327,6 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock* if ((ascScan && (key != TSKEY_INITIAL_VAL && key <= binfo.window.ekey)) || (!ascScan && (key != TSKEY_INITIAL_VAL && key >= binfo.window.skey))) { - bool cacheDataInFileBlockHole = (ascScan && (key != TSKEY_INITIAL_VAL && key < binfo.window.skey)) || (!ascScan && (key != TSKEY_INITIAL_VAL && key > binfo.window.ekey)); if (cacheDataInFileBlockHole) { @@ -1351,7 +1369,7 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock* pTsdbReadHandle->realNumOfRows = binfo.rows; cur->rows = binfo.rows; - cur->win = binfo.window; + cur->win = binfo.window; cur->mixBlock = false; cur->blockCompleted = true; @@ -1362,9 +1380,9 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock* cur->lastKey = binfo.window.skey - 1; cur->pos = -1; } - } else { // partially copy to dest buffer + } else { // partially copy to dest buffer // make sure to only load once - bool firstTimeExtract = ((cur->pos == 0 && ascScan) || (cur->pos == binfo.rows -1 && (!ascScan))); + bool firstTimeExtract = ((cur->pos == 0 && ascScan) || (cur->pos == binfo.rows - 1 && (!ascScan))); if (pTsdbReadHandle->outputCapacity < binfo.rows && firstTimeExtract) { code = doLoadFileDataBlock(pTsdbReadHandle, pBlock, pCheckInfo, cur->slot); if (code != TSDB_CODE_SUCCESS) { @@ -1377,7 +1395,7 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock* } if (pTsdbReadHandle->outputCapacity >= binfo.rows) { - ASSERT(cur->blockCompleted); + ASSERT(cur->blockCompleted || cur->mixBlock); } if (cur->rows == binfo.rows) { @@ -1638,9 +1656,7 @@ static int32_t mergeTwoRowFromMem(STsdbReadHandle* pTsdbReadHandle, int32_t capa int32_t numOfColsOfRow1 = 0; if (pSchema1 == NULL) { - // pSchema1 = metaGetTbTSchema(REPO_META(pTsdbReadHandle->pTsdb), uid, TD_ROW_SVER(row1)); - // TODO: use the real schemaVersion - pSchema1 = metaGetTbTSchema(REPO_META(pTsdbReadHandle->pTsdb), uid, 1); + pSchema1 = metaGetTbTSchema(REPO_META(pTsdbReadHandle->pTsdb), uid, TD_ROW_SVER(row1)); } #ifdef TD_DEBUG_PRINT_ROW @@ -1657,7 +1673,7 @@ static int32_t mergeTwoRowFromMem(STsdbReadHandle* pTsdbReadHandle, int32_t capa if (row2) { isRow2DataRow = TD_IS_TP_ROW(row2); if (pSchema2 == NULL) { - pSchema2 = metaGetTbTSchema(REPO_META(pTsdbReadHandle->pTsdb), uid, TD_ROW_SVER(row2)); + pSchema2 = metaGetTbTSchema(REPO_META(pTsdbReadHandle->pTsdb), uid, TD_ROW_SVER(row2)); } if (isRow2DataRow) { numOfColsOfRow2 = schemaNCols(pSchema2); @@ -1730,6 +1746,7 @@ static int32_t mergeTwoRowFromMem(STsdbReadHandle* pTsdbReadHandle, int32_t capa if (*lastRowKey != TSKEY_INITIAL_VAL) { ++(*curRow); } + *lastRowKey = rowKey; ++nResult; } else if (update) { mergeOption = 2; @@ -1737,8 +1754,6 @@ static int32_t mergeTwoRowFromMem(STsdbReadHandle* pTsdbReadHandle, int32_t capa mergeOption = 0; break; } - - *lastRowKey = rowKey; } } else { // TODO: use STSRowIter @@ -1751,6 +1766,7 @@ static int32_t mergeTwoRowFromMem(STsdbReadHandle* pTsdbReadHandle, int32_t capa if (*lastRowKey != TSKEY_INITIAL_VAL) { ++(*curRow); } + *lastRowKey = rowKey; ++nResult; } else if (update) { mergeOption = 2; @@ -1758,7 +1774,6 @@ static int32_t mergeTwoRowFromMem(STsdbReadHandle* pTsdbReadHandle, int32_t capa mergeOption = 0; break; } - *lastRowKey = rowKey; } else { SKvRowIdx* pColIdx = tdKvRowColIdxAt(row, chosen_itr - 1); colId = pColIdx->colId; @@ -1876,7 +1891,7 @@ static void copyAllRemainRowsFromFileBlock(STsdbReadHandle* pTsdbReadHandle, STa bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order); - int32_t step = ascScan? 1 : -1; + int32_t step = ascScan ? 1 : -1; int32_t start = cur->pos; int32_t end = endPos; @@ -1891,8 +1906,8 @@ static void copyAllRemainRowsFromFileBlock(STsdbReadHandle* pTsdbReadHandle, STa // the time window should always be ascending order: skey <= ekey cur->win = (STimeWindow){.skey = tsArray[start], .ekey = tsArray[end]}; cur->mixBlock = (numOfRows != pBlockInfo->rows); - cur->lastKey = tsArray[endPos] + step; - cur->blockCompleted = (ascScan? (endPos == pBlockInfo->rows - 1):(endPos == 0)); + cur->lastKey = tsArray[endPos] + step; + cur->blockCompleted = (ascScan ? (endPos == pBlockInfo->rows - 1) : (endPos == 0)); // The value of pos may be -1 or pBlockInfo->rows, and it is invalid in both cases. int32_t pos = endPos + step; @@ -1908,7 +1923,7 @@ int32_t getEndPosInDataBlock(STsdbReadHandle* pTsdbReadHandle, SDataBlockInfo* p // NOTE: reverse the order to find the end position in data block int32_t endPos = -1; bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order); - int32_t order = ascScan? TSDB_ORDER_DESC : TSDB_ORDER_ASC; + int32_t order = ascScan ? TSDB_ORDER_DESC : TSDB_ORDER_ASC; SQueryFilePos* cur = &pTsdbReadHandle->cur; SDataCols* pCols = pTsdbReadHandle->rhelper.pDCols[0]; @@ -1963,12 +1978,12 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf SDataCols* pCols = pTsdbReadHandle->rhelper.pDCols[0]; assert(pCols->cols[0].type == TSDB_DATA_TYPE_TIMESTAMP && pCols->cols[0].colId == PRIMARYKEY_TIMESTAMP_COL_ID && cur->pos >= 0 && cur->pos < pBlock->numOfRows); - + // Even Multi-Version supported, the records with duplicated TSKEY would be merged inside of tsdbLoadData interface. TSKEY* tsArray = pCols->cols[0].pData; assert(pCols->numOfRows == pBlock->numOfRows && tsArray[0] == pBlock->keyFirst && tsArray[pBlock->numOfRows - 1] == pBlock->keyLast); - bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order); + bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order); int32_t step = ascScan ? 1 : -1; // for search the endPos, so the order needs to reverse @@ -1979,8 +1994,9 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf STimeWindow* pWin = &blockInfo.window; tsdbDebug("%p uid:%" PRIu64 " start merge data block, file block range:%" PRIu64 "-%" PRIu64 - " rows:%d, start:%d, end:%d, %s", pTsdbReadHandle, pCheckInfo->tableId, pWin->skey, pWin->ekey, blockInfo.rows, - cur->pos, endPos, pTsdbReadHandle->idStr); + " rows:%d, start:%d, end:%d, %s", + pTsdbReadHandle, pCheckInfo->tableId, pWin->skey, pWin->ekey, blockInfo.rows, cur->pos, endPos, + pTsdbReadHandle->idStr); // compared with the data from in-memory buffer, to generate the correct timestamp array list int32_t numOfRows = 0; @@ -1993,6 +2009,7 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf int32_t pos = cur->pos; cur->win = TSWINDOW_INITIALIZER; + bool adjustPos = false; // no data in buffer, load data from file directly if (pCheckInfo->iiter == NULL && pCheckInfo->iter == NULL) { @@ -2014,6 +2031,13 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf break; } + if (adjustPos) { + if (key == lastKeyAppend) { + pos -= step; + } + adjustPos = false; + } + if (((pos > endPos || tsArray[pos] > pTsdbReadHandle->window.ekey) && ascScan) || ((pos < endPos || tsArray[pos] < pTsdbReadHandle->window.ekey) && !ascScan)) { break; @@ -2091,8 +2115,9 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf } // still assign data into current row - numOfRows += mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols, - pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastKeyAppend); + numOfRows += + mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols, + pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastKeyAppend); if (cur->win.skey == TSKEY_INITIAL_VAL) { cur->win.skey = key; @@ -2105,7 +2130,9 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf moveToNextRowInMem(pCheckInfo); pos += step; + adjustPos = true; } else { + // discard the memory record moveToNextRowInMem(pCheckInfo); } } else if ((key > tsArray[pos] && ascScan) || (key < tsArray[pos] && !ascScan)) { @@ -2155,8 +2182,7 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf * if cache is empty, load remain file block data. In contrast, if there are remain data in cache, do NOT * copy them all to result buffer, since it may be overlapped with file data block. */ - if (node == NULL || - ((TD_ROW_KEY((STSRow*)SL_GET_NODE_DATA(node)) > pTsdbReadHandle->window.ekey) && ascScan) || + if (node == NULL || ((TD_ROW_KEY((STSRow*)SL_GET_NODE_DATA(node)) > pTsdbReadHandle->window.ekey) && ascScan) || ((TD_ROW_KEY((STSRow*)SL_GET_NODE_DATA(node)) < pTsdbReadHandle->window.ekey) && !ascScan)) { // no data in cache or data in cache is greater than the ekey of time window, load data from file block if (cur->win.skey == TSKEY_INITIAL_VAL) { @@ -2177,7 +2203,7 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf } cur->blockCompleted = (((pos > endPos || cur->lastKey > pTsdbReadHandle->window.ekey) && ascScan) || - ((pos < endPos || cur->lastKey < pTsdbReadHandle->window.ekey) && !ascScan)); + ((pos < endPos || cur->lastKey < pTsdbReadHandle->window.ekey) && !ascScan)); if (!ascScan) { TSWAP(cur->win.skey, cur->win.ekey); @@ -2796,7 +2822,13 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int return numOfRows; } -static int32_t getAllTableList(SMeta* pMeta, uint64_t uid, SArray* list) { +void* tsdbGetIdx(SMeta* pMeta) { + if (pMeta == NULL) { + return NULL; + } + return metaGetIdx(pMeta); +} +int32_t tsdbGetAllTableList(SMeta* pMeta, uint64_t uid, SArray* list) { SMCtbCursor* pCur = metaOpenCtbCursor(pMeta, uid); while (1) { @@ -3333,8 +3365,8 @@ bool isTsdbCacheLastRow(tsdbReaderT* pReader) { return ((STsdbReadHandle*)pReader)->cachelastrow > TSDB_CACHED_TYPE_NONE; } -int32_t checkForCachedLastRow(STsdbReadHandle* pTsdbReadHandle, STableGroupInfo* groupList) { - assert(pTsdbReadHandle != NULL && groupList != NULL); +int32_t checkForCachedLastRow(STsdbReadHandle* pTsdbReadHandle, STableListInfo* tableList) { + assert(pTsdbReadHandle != NULL && tableList != NULL); // TSKEY key = TSKEY_INITIAL_VAL; // @@ -3381,68 +3413,68 @@ int32_t checkForCachedLast(STsdbReadHandle* pTsdbReadHandle) { return code; } -STimeWindow updateLastrowForEachGroup(STableGroupInfo* groupList) { +STimeWindow updateLastrowForEachGroup(STableListInfo* pList) { STimeWindow window = {INT64_MAX, INT64_MIN}; - int32_t totalNumOfTable = 0; - SArray* emptyGroup = taosArrayInit(16, sizeof(int32_t)); - - // NOTE: starts from the buffer in case of descending timestamp order check data blocks - size_t numOfGroups = taosArrayGetSize(groupList->pGroupList); - for (int32_t j = 0; j < numOfGroups; ++j) { - SArray* pGroup = taosArrayGetP(groupList->pGroupList, j); - TSKEY key = TSKEY_INITIAL_VAL; - - STableKeyInfo keyInfo = {0}; - - size_t numOfTables = taosArrayGetSize(pGroup); - for (int32_t i = 0; i < numOfTables; ++i) { - STableKeyInfo* pInfo = (STableKeyInfo*)taosArrayGet(pGroup, i); - - // if the lastKey equals to INT64_MIN, there is no data in this table - TSKEY lastKey = 0; //((STable*)(pInfo->pTable))->lastKey; - if (key < lastKey) { - key = lastKey; - - // keyInfo.pTable = pInfo->pTable; - keyInfo.lastKey = key; - pInfo->lastKey = key; - - if (key < window.skey) { - window.skey = key; - } - - if (key > window.ekey) { - window.ekey = key; - } - } - } - - // more than one table in each group, only one table left for each group - // if (keyInfo.pTable != NULL) { - // totalNumOfTable++; - // if (taosArrayGetSize(pGroup) == 1) { - // // do nothing - // } else { - // taosArrayClear(pGroup); - // taosArrayPush(pGroup, &keyInfo); - // } - // } else { // mark all the empty groups, and remove it later - // taosArrayDestroy(pGroup); - // taosArrayPush(emptyGroup, &j); - // } - } - - // window does not being updated, so set the original - if (window.skey == INT64_MAX && window.ekey == INT64_MIN) { - window = TSWINDOW_INITIALIZER; - assert(totalNumOfTable == 0 && taosArrayGetSize(groupList->pGroupList) == numOfGroups); - } - - taosArrayRemoveBatch(groupList->pGroupList, TARRAY_GET_START(emptyGroup), (int32_t)taosArrayGetSize(emptyGroup)); - taosArrayDestroy(emptyGroup); - - groupList->numOfTables = totalNumOfTable; + // int32_t totalNumOfTable = 0; + // SArray* emptyGroup = taosArrayInit(16, sizeof(int32_t)); + // + // // NOTE: starts from the buffer in case of descending timestamp order check data blocks + // size_t numOfGroups = taosArrayGetSize(groupList->pGroupList); + // for (int32_t j = 0; j < numOfGroups; ++j) { + // SArray* pGroup = taosArrayGetP(groupList->pGroupList, j); + // TSKEY key = TSKEY_INITIAL_VAL; + // + // STableKeyInfo keyInfo = {0}; + // + // size_t numOfTables = taosArrayGetSize(pGroup); + // for (int32_t i = 0; i < numOfTables; ++i) { + // STableKeyInfo* pInfo = (STableKeyInfo*)taosArrayGet(pGroup, i); + // + // // if the lastKey equals to INT64_MIN, there is no data in this table + // TSKEY lastKey = 0; //((STable*)(pInfo->pTable))->lastKey; + // if (key < lastKey) { + // key = lastKey; + // + // // keyInfo.pTable = pInfo->pTable; + // keyInfo.lastKey = key; + // pInfo->lastKey = key; + // + // if (key < window.skey) { + // window.skey = key; + // } + // + // if (key > window.ekey) { + // window.ekey = key; + // } + // } + // } + // + // // more than one table in each group, only one table left for each group + // // if (keyInfo.pTable != NULL) { + // // totalNumOfTable++; + // // if (taosArrayGetSize(pGroup) == 1) { + // // // do nothing + // // } else { + // // taosArrayClear(pGroup); + // // taosArrayPush(pGroup, &keyInfo); + // // } + // // } else { // mark all the empty groups, and remove it later + // // taosArrayDestroy(pGroup); + // // taosArrayPush(emptyGroup, &j); + // // } + // } + // + // // window does not being updated, so set the original + // if (window.skey == INT64_MAX && window.ekey == INT64_MIN) { + // window = TSWINDOW_INITIALIZER; + // assert(totalNumOfTable == 0 && taosArrayGetSize(groupList->pGroupList) == numOfGroups); + // } + // + // taosArrayRemoveBatch(groupList->pGroupList, TARRAY_GET_START(emptyGroup), (int32_t)taosArrayGetSize(emptyGroup)); + // taosArrayDestroy(emptyGroup); + // + // groupList->numOfTables = totalNumOfTable; return window; } @@ -3473,7 +3505,6 @@ void tsdbRetrieveDataBlockInfo(tsdbReaderT* pTsdbReadHandle, SDataBlockInfo* pDa pDataBlockInfo->rows = cur->rows; pDataBlockInfo->window = cur->win; - // ASSERT(pDataBlockInfo->numOfCols >= (int32_t)(QH_GET_NUM_OF_COLS(pHandle)); } /* @@ -3539,9 +3570,9 @@ int32_t tsdbRetrieveDataBlockStatisInfo(tsdbReaderT* pTsdbReadHandle, SColumnDat if (IS_BSMA_ON(&(pHandle->pSchema->columns[slotIds[i]]))) { if (pHandle->suppInfo.pstatis[i].numOfNull == -1) { // set the column data are all NULL pHandle->suppInfo.pstatis[i].numOfNull = pBlockInfo->compBlock->numOfRows; - } else { - pHandle->suppInfo.plist[i] = &pHandle->suppInfo.pstatis[i]; } + + pHandle->suppInfo.plist[i] = &pHandle->suppInfo.pstatis[i]; } else { *allHave = false; } @@ -3590,108 +3621,6 @@ SArray* tsdbRetrieveDataBlock(tsdbReaderT* pTsdbReadHandle, SArray* pIdList) { } } } -#if 0 -void filterPrepare(void* expr, void* param) { - tExprNode* pExpr = (tExprNode*)expr; - if (pExpr->_node.info != NULL) { - return; - } - - pExpr->_node.info = taosMemoryCalloc(1, sizeof(tQueryInfo)); - - STSchema* pTSSchema = (STSchema*) param; - tQueryInfo* pInfo = pExpr->_node.info; - tVariant* pCond = pExpr->_node.pRight->pVal; - SSchema* pSchema = pExpr->_node.pLeft->pSchema; - - pInfo->sch = *pSchema; - pInfo->optr = pExpr->_node.optr; - pInfo->compare = getComparFunc(pInfo->sch.type, pInfo->optr); - pInfo->indexed = pTSSchema->columns->colId == pInfo->sch.colId; - - if (pInfo->optr == TSDB_RELATION_IN) { - int dummy = -1; - SHashObj *pObj = NULL; - if (pInfo->sch.colId == TSDB_TBNAME_COLUMN_INDEX) { - pObj = taosHashInit(256, taosGetDefaultHashFunction(pInfo->sch.type), true, false); - SArray *arr = (SArray *)(pCond->arr); - for (size_t i = 0; i < taosArrayGetSize(arr); i++) { - char* p = taosArrayGetP(arr, i); - strntolower_s(varDataVal(p), varDataVal(p), varDataLen(p)); - taosHashPut(pObj, varDataVal(p), varDataLen(p), &dummy, sizeof(dummy)); - } - } else { - buildFilterSetFromBinary((void **)&pObj, pCond->pz, pCond->nLen); - } - pInfo->q = (char *)pObj; - } else if (pCond != NULL) { - uint32_t size = pCond->nLen * TSDB_NCHAR_SIZE; - if (size < (uint32_t)pSchema->bytes) { - size = pSchema->bytes; - } - // to make sure tonchar does not cause invalid write, since the '\0' needs at least sizeof(TdUcs4) space. - pInfo->q = taosMemoryCalloc(1, size + TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE); - tVariantDump(pCond, pInfo->q, pSchema->type, true); - } -} - -#endif - -static int32_t tableGroupComparFn(const void* p1, const void* p2, const void* param) { -#if 0 - STableGroupSupporter* pTableGroupSupp = (STableGroupSupporter*) param; - STable* pTable1 = ((STableKeyInfo*) p1)->uid; - STable* pTable2 = ((STableKeyInfo*) p2)->uid; - - for (int32_t i = 0; i < pTableGroupSupp->numOfCols; ++i) { - SColIndex* pColIndex = &pTableGroupSupp->pCols[i]; - int32_t colIndex = pColIndex->colIndex; - - assert(colIndex >= TSDB_TBNAME_COLUMN_INDEX); - - char * f1 = NULL; - char * f2 = NULL; - int32_t type = 0; - int32_t bytes = 0; - - if (colIndex == TSDB_TBNAME_COLUMN_INDEX) { - f1 = (char*) TABLE_NAME(pTable1); - f2 = (char*) TABLE_NAME(pTable2); - type = TSDB_DATA_TYPE_BINARY; - bytes = tGetTbnameColumnSchema()->bytes; - } else { - if (pTableGroupSupp->pTagSchema && colIndex < pTableGroupSupp->pTagSchema->numOfCols) { - STColumn* pCol = schemaColAt(pTableGroupSupp->pTagSchema, colIndex); - bytes = pCol->bytes; - type = pCol->type; - f1 = tdGetKVRowValOfCol(pTable1->tagVal, pCol->colId); - f2 = tdGetKVRowValOfCol(pTable2->tagVal, pCol->colId); - } - } - - // this tags value may be NULL - if (f1 == NULL && f2 == NULL) { - continue; - } - - if (f1 == NULL) { - return -1; - } - - if (f2 == NULL) { - return 1; - } - - int32_t ret = doCompare(f1, f2, type, bytes); - if (ret == 0) { - continue; - } else { - return ret; - } - } -#endif - return 0; -} static int tsdbCheckInfoCompar(const void* key1, const void* key2) { if (((STableCheckInfo*)key1)->tableId < ((STableCheckInfo*)key2)->tableId) { @@ -3704,320 +3633,6 @@ static int tsdbCheckInfoCompar(const void* key1, const void* key2) { } } -void createTableGroupImpl(SArray* pGroups, SArray* pTableList, size_t numOfTables, TSKEY skey, - STableGroupSupporter* pSupp, __ext_compar_fn_t compareFn) { - STable* pTable = taosArrayGetP(pTableList, 0); - SArray* g = taosArrayInit(16, sizeof(STableKeyInfo)); - - STableKeyInfo info = {.lastKey = skey}; - taosArrayPush(g, &info); - - for (int32_t i = 1; i < numOfTables; ++i) { - STable** prev = taosArrayGet(pTableList, i - 1); - STable** p = taosArrayGet(pTableList, i); - - int32_t ret = compareFn(prev, p, pSupp); - assert(ret == 0 || ret == -1); - - if (ret == 0) { - STableKeyInfo info1 = {.lastKey = skey}; - taosArrayPush(g, &info1); - } else { - taosArrayPush(pGroups, &g); // current group is ended, start a new group - g = taosArrayInit(16, sizeof(STableKeyInfo)); - - STableKeyInfo info1 = {.lastKey = skey}; - taosArrayPush(g, &info1); - } - } - - taosArrayPush(pGroups, &g); -} - -SArray* createTableGroup(SArray* pTableList, SSchemaWrapper* pTagSchema, SColIndex* pCols, int32_t numOfOrderCols, - TSKEY skey) { - assert(pTableList != NULL); - SArray* pTableGroup = taosArrayInit(1, POINTER_BYTES); - - size_t size = taosArrayGetSize(pTableList); - if (size == 0) { - tsdbDebug("no qualified tables"); - return pTableGroup; - } - - if (numOfOrderCols == 0 || size == 1) { // no group by tags clause or only one table - SArray* sa = taosArrayDup(pTableList); - if (sa == NULL) { - taosArrayDestroy(pTableGroup); - return NULL; - } - - taosArrayPush(pTableGroup, &sa); - tsdbDebug("all %" PRIzu " tables belong to one group", size); - } else { - STableGroupSupporter sup = {0}; - sup.numOfCols = numOfOrderCols; - sup.pTagSchema = pTagSchema->pSchema; - sup.pCols = pCols; - - taosqsort(pTableList->pData, size, sizeof(STableKeyInfo), &sup, tableGroupComparFn); - createTableGroupImpl(pTableGroup, pTableList, size, skey, &sup, tableGroupComparFn); - } - - return pTableGroup; -} - -// static bool tableFilterFp(const void* pNode, void* param) { -// tQueryInfo* pInfo = (tQueryInfo*) param; -// -// STable* pTable = (STable*)(SL_GET_NODE_DATA((SSkipListNode*)pNode)); -// -// char* val = NULL; -// if (pInfo->sch.colId == TSDB_TBNAME_COLUMN_INDEX) { -// val = (char*) TABLE_NAME(pTable); -// } else { -// val = tdGetKVRowValOfCol(pTable->tagVal, pInfo->sch.colId); -// } -// -// if (pInfo->optr == TSDB_RELATION_ISNULL || pInfo->optr == TSDB_RELATION_NOTNULL) { -// if (pInfo->optr == TSDB_RELATION_ISNULL) { -// return (val == NULL) || isNull(val, pInfo->sch.type); -// } else if (pInfo->optr == TSDB_RELATION_NOTNULL) { -// return (val != NULL) && (!isNull(val, pInfo->sch.type)); -// } -// } else if (pInfo->optr == TSDB_RELATION_IN) { -// int type = pInfo->sch.type; -// if (type == TSDB_DATA_TYPE_BOOL || IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_TIMESTAMP) { -// int64_t v; -// GET_TYPED_DATA(v, int64_t, pInfo->sch.type, val); -// return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v)); -// } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) { -// uint64_t v; -// GET_TYPED_DATA(v, uint64_t, pInfo->sch.type, val); -// return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v)); -// } -// else if (type == TSDB_DATA_TYPE_DOUBLE || type == TSDB_DATA_TYPE_FLOAT) { -// double v; -// GET_TYPED_DATA(v, double, pInfo->sch.type, val); -// return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v)); -// } else if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR){ -// return NULL != taosHashGet((SHashObj *)pInfo->q, varDataVal(val), varDataLen(val)); -// } -// -// } -// -// int32_t ret = 0; -// if (val == NULL) { //the val is possible to be null, so check it out carefully -// ret = -1; // val is missing in table tags value pairs -// } else { -// ret = pInfo->compare(val, pInfo->q); -// } -// -// switch (pInfo->optr) { -// case TSDB_RELATION_EQUAL: { -// return ret == 0; -// } -// case TSDB_RELATION_NOT_EQUAL: { -// return ret != 0; -// } -// case TSDB_RELATION_GREATER_EQUAL: { -// return ret >= 0; -// } -// case TSDB_RELATION_GREATER: { -// return ret > 0; -// } -// case TSDB_RELATION_LESS_EQUAL: { -// return ret <= 0; -// } -// case TSDB_RELATION_LESS: { -// return ret < 0; -// } -// case TSDB_RELATION_LIKE: { -// return ret == 0; -// } -// case TSDB_RELATION_MATCH: { -// return ret == 0; -// } -// case TSDB_RELATION_NMATCH: { -// return ret == 0; -// } -// case TSDB_RELATION_IN: { -// return ret == 1; -// } -// -// default: -// assert(false); -// } -// -// return true; -//} - -// static void getTableListfromSkipList(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, SExprTraverseSupp -// *param); - -// static int32_t doQueryTableList(STable* pSTable, SArray* pRes, tExprNode* pExpr) { -// // // query according to the expression tree -// SExprTraverseSupp supp = { -// .nodeFilterFn = (__result_filter_fn_t)tableFilterFp, -// .setupInfoFn = filterPrepare, -// .pExtInfo = pSTable->tagSchema, -// }; -// -// getTableListfromSkipList(pExpr, pSTable->pIndex, pRes, &supp); -// tExprTreeDestroy(pExpr, destroyHelper); -// return TSDB_CODE_SUCCESS; -//} - -int32_t tsdbQuerySTableByTagCond(void* pMeta, uint64_t uid, TSKEY skey, const char* pTagCond, size_t len, - int16_t tagNameRelType, const char* tbnameCond, STableGroupInfo* pGroupInfo, - SColIndex* pColIndex, int32_t numOfCols, uint64_t reqId, uint64_t taskId) { - SMetaReader mr = {0}; - - metaReaderInit(&mr, (SMeta*)pMeta, 0); - - if (metaGetTableEntryByUid(&mr, uid) < 0) { - tsdbError("%p failed to get stable, uid:%" PRIu64 ", TID:0x%" PRIx64 " QID:0x%" PRIx64, pMeta, uid, taskId, reqId); - metaReaderClear(&mr); - terrno = TSDB_CODE_PAR_TABLE_NOT_EXIST; - goto _error; - } else { - tsdbDebug("%p succeed to get stable, uid:%" PRIu64 ", TID:0x%" PRIx64 " QID:0x%" PRIx64, pMeta, uid, taskId, reqId); - } - - if (mr.me.type != TSDB_SUPER_TABLE) { - tsdbError("%p query normal tag not allowed, uid:%" PRIu64 ", TID:0x%" PRIx64 " QID:0x%" PRIx64, pMeta, uid, taskId, - reqId); - terrno = TSDB_CODE_OPS_NOT_SUPPORT; // basically, this error is caused by invalid sql issued by client - metaReaderClear(&mr); - goto _error; - } - - metaReaderClear(&mr); - - // NOTE: not add ref count for super table - SArray* res = taosArrayInit(8, sizeof(STableKeyInfo)); - SSchemaWrapper* pTagSchema = metaGetTableSchema(pMeta, uid, 1, true); - - // no tags and tbname condition, all child tables of this stable are involved - if (tbnameCond == NULL && (pTagCond == NULL || len == 0)) { - int32_t ret = getAllTableList(pMeta, uid, res); - if (ret != TSDB_CODE_SUCCESS) { - goto _error; - } - - pGroupInfo->numOfTables = (uint32_t)taosArrayGetSize(res); - pGroupInfo->pGroupList = createTableGroup(res, pTagSchema, pColIndex, numOfCols, skey); - - tsdbDebug("%p no table name/tag condition, all tables qualified, numOfTables:%u, group:%zu, TID:0x%" PRIx64 - " QID:0x%" PRIx64, - pMeta, pGroupInfo->numOfTables, taosArrayGetSize(pGroupInfo->pGroupList), taskId, reqId); - - taosArrayDestroy(res); - return ret; - } - - int32_t ret = TSDB_CODE_SUCCESS; - - SFilterInfo* filterInfo = NULL; - ret = filterInitFromNode((SNode*)pTagCond, &filterInfo, 0); - if (ret != TSDB_CODE_SUCCESS) { - terrno = ret; - return ret; - } - ret = tsdbQueryTableList(pMeta, res, filterInfo); - pGroupInfo->numOfTables = (uint32_t)taosArrayGetSize(res); - pGroupInfo->pGroupList = createTableGroup(res, pTagSchema, pColIndex, numOfCols, skey); - - // tsdbDebug("%p stable tid:%d, uid:%" PRIu64 " query, numOfTables:%u, belong to %" PRIzu " groups", tsdb, - // pTable->tableId, pTable->uid, pGroupInfo->numOfTables, taosArrayGetSize(pGroupInfo->pGroupList)); - - taosArrayDestroy(res); - return ret; - -_error: - return terrno; -} - -int32_t tsdbQueryTableList(void* pMeta, SArray* pRes, void* filterInfo) { - // impl later - - return TSDB_CODE_SUCCESS; -} -int32_t tsdbGetOneTableGroup(void* pMeta, uint64_t uid, TSKEY startKey, STableGroupInfo* pGroupInfo) { - SMetaReader mr = {0}; - - metaReaderInit(&mr, (SMeta*)pMeta, 0); - - if (metaGetTableEntryByUid(&mr, uid) < 0) { - terrno = TSDB_CODE_PAR_TABLE_NOT_EXIST; - goto _error; - } - - metaReaderClear(&mr); - - pGroupInfo->numOfTables = 1; - pGroupInfo->pGroupList = taosArrayInit(1, POINTER_BYTES); - - SArray* group = taosArrayInit(1, sizeof(STableKeyInfo)); - - STableKeyInfo info = {.lastKey = startKey, .uid = uid}; - taosArrayPush(group, &info); - - taosArrayPush(pGroupInfo->pGroupList, &group); - return TSDB_CODE_SUCCESS; - -_error: - metaReaderClear(&mr); - return terrno; -} - -#if 0 -int32_t tsdbGetTableGroupFromIdListT(STsdb* tsdb, SArray* pTableIdList, STableGroupInfo* pGroupInfo) { - if (tsdbRLockRepoMeta(tsdb) < 0) { - return terrno; - } - - assert(pTableIdList != NULL); - size_t size = taosArrayGetSize(pTableIdList); - pGroupInfo->pGroupList = taosArrayInit(1, POINTER_BYTES); - SArray* group = taosArrayInit(1, sizeof(STableKeyInfo)); - - for(int32_t i = 0; i < size; ++i) { - STableIdInfo *id = taosArrayGet(pTableIdList, i); - - STable* pTable = tsdbGetTableByUid(tsdbGetMeta(tsdb), id->uid); - if (pTable == NULL) { - tsdbWarn("table uid:%"PRIu64", tid:%d has been drop already", id->uid, id->tid); - continue; - } - - if (pTable->type == TSDB_SUPER_TABLE) { - tsdbError("direct query on super tale is not allowed, table uid:%"PRIu64", tid:%d", id->uid, id->tid); - terrno = TSDB_CODE_QRY_INVALID_MSG; - tsdbUnlockRepoMeta(tsdb); - taosArrayDestroy(group); - return terrno; - } - - STableKeyInfo info = {.pTable = pTable, .lastKey = id->key}; - taosArrayPush(group, &info); - } - - if (tsdbUnlockRepoMeta(tsdb) < 0) { - taosArrayDestroy(group); - return terrno; - } - - pGroupInfo->numOfTables = (uint32_t) taosArrayGetSize(group); - if (pGroupInfo->numOfTables > 0) { - taosArrayPush(pGroupInfo->pGroupList, &group); - } else { - taosArrayDestroy(group); - } - - return TSDB_CODE_SUCCESS; -} -#endif static void* doFreeColumnInfoData(SArray* pColumnInfoData) { if (pColumnInfoData == NULL) { return NULL; @@ -4086,287 +3701,3 @@ void tsdbCleanupReadHandle(tsdbReaderT queryHandle) { taosMemoryFreeClear(pTsdbReadHandle); } - -#if 0 -void tsdbDestroyTableGroup(STableGroupInfo *pGroupList) { - assert(pGroupList != NULL); - - size_t numOfGroup = taosArrayGetSize(pGroupList->pGroupList); - - for(int32_t i = 0; i < numOfGroup; ++i) { - SArray* p = taosArrayGetP(pGroupList->pGroupList, i); - - size_t numOfTables = taosArrayGetSize(p); - for(int32_t j = 0; j < numOfTables; ++j) { - STable* pTable = taosArrayGetP(p, j); - if (pTable != NULL) { // in case of handling retrieve data from tsdb - tsdbUnRefTable(pTable); - } - //assert(pTable != NULL); - } - - taosArrayDestroy(p); - } - - taosHashCleanup(pGroupList->map); - taosArrayDestroy(pGroupList->pGroupList); - pGroupList->numOfTables = 0; -} - -static void applyFilterToSkipListNode(SSkipList *pSkipList, tExprNode *pExpr, SArray *pResult, SExprTraverseSupp *param) { - SSkipListIterator* iter = tSkipListCreateIter(pSkipList); - - // Scan each node in the skiplist by using iterator - while (tSkipListIterNext(iter)) { - SSkipListNode *pNode = tSkipListIterGet(iter); - if (exprTreeApplyFilter(pExpr, pNode, param)) { - taosArrayPush(pResult, &(SL_GET_NODE_DATA(pNode))); - } - } - - tSkipListDestroyIter(iter); -} - -typedef struct { - char* v; - int32_t optr; -} SEndPoint; - -typedef struct { - SEndPoint* start; - SEndPoint* end; -} SQueryCond; - -// todo check for malloc failure -static int32_t setQueryCond(tQueryInfo *queryColInfo, SQueryCond* pCond) { - int32_t optr = queryColInfo->optr; - - if (optr == TSDB_RELATION_GREATER || optr == TSDB_RELATION_GREATER_EQUAL || - optr == TSDB_RELATION_EQUAL || optr == TSDB_RELATION_NOT_EQUAL) { - pCond->start = taosMemoryCalloc(1, sizeof(SEndPoint)); - pCond->start->optr = queryColInfo->optr; - pCond->start->v = queryColInfo->q; - } else if (optr == TSDB_RELATION_LESS || optr == TSDB_RELATION_LESS_EQUAL) { - pCond->end = taosMemoryCalloc(1, sizeof(SEndPoint)); - pCond->end->optr = queryColInfo->optr; - pCond->end->v = queryColInfo->q; - } else if (optr == TSDB_RELATION_IN) { - pCond->start = taosMemoryCalloc(1, sizeof(SEndPoint)); - pCond->start->optr = queryColInfo->optr; - pCond->start->v = queryColInfo->q; - } else if (optr == TSDB_RELATION_LIKE) { - assert(0); - } else if (optr == TSDB_RELATION_MATCH) { - assert(0); - } else if (optr == TSDB_RELATION_NMATCH) { - assert(0); - } - - return TSDB_CODE_SUCCESS; -} - -static void queryIndexedColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArray* result) { - SSkipListIterator* iter = NULL; - - SQueryCond cond = {0}; - if (setQueryCond(pQueryInfo, &cond) != TSDB_CODE_SUCCESS) { - //todo handle error - } - - if (cond.start != NULL) { - iter = tSkipListCreateIterFromVal(pSkipList, (char*) cond.start->v, pSkipList->type, TSDB_ORDER_ASC); - } else { - iter = tSkipListCreateIterFromVal(pSkipList, (char*)(cond.end ? cond.end->v: NULL), pSkipList->type, TSDB_ORDER_DESC); - } - - if (cond.start != NULL) { - int32_t optr = cond.start->optr; - - if (optr == TSDB_RELATION_EQUAL) { // equals - while(tSkipListIterNext(iter)) { - SSkipListNode* pNode = tSkipListIterGet(iter); - - int32_t ret = pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v); - if (ret != 0) { - break; - } - - STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - } - } else if (optr == TSDB_RELATION_GREATER || optr == TSDB_RELATION_GREATER_EQUAL) { // greater equal - bool comp = true; - int32_t ret = 0; - - while(tSkipListIterNext(iter)) { - SSkipListNode* pNode = tSkipListIterGet(iter); - - if (comp) { - ret = pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v); - assert(ret >= 0); - } - - if (ret == 0 && optr == TSDB_RELATION_GREATER) { - continue; - } else { - STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - comp = false; - } - } - } else if (optr == TSDB_RELATION_NOT_EQUAL) { // not equal - bool comp = true; - - while(tSkipListIterNext(iter)) { - SSkipListNode* pNode = tSkipListIterGet(iter); - comp = comp && (pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v) == 0); - if (comp) { - continue; - } - - STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - } - - tSkipListDestroyIter(iter); - - comp = true; - iter = tSkipListCreateIterFromVal(pSkipList, (char*) cond.start->v, pSkipList->type, TSDB_ORDER_DESC); - while(tSkipListIterNext(iter)) { - SSkipListNode* pNode = tSkipListIterGet(iter); - comp = comp && (pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v) == 0); - if (comp) { - continue; - } - - STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - } - - } else if (optr == TSDB_RELATION_IN) { - while(tSkipListIterNext(iter)) { - SSkipListNode* pNode = tSkipListIterGet(iter); - - int32_t ret = pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v); - if (ret != 0) { - break; - } - - STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - } - - } else { - assert(0); - } - } else { - int32_t optr = cond.end ? cond.end->optr : TSDB_RELATION_INVALID; - if (optr == TSDB_RELATION_LESS || optr == TSDB_RELATION_LESS_EQUAL) { - bool comp = true; - int32_t ret = 0; - - while (tSkipListIterNext(iter)) { - SSkipListNode *pNode = tSkipListIterGet(iter); - - if (comp) { - ret = pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.end->v); - assert(ret <= 0); - } - - if (ret == 0 && optr == TSDB_RELATION_LESS) { - continue; - } else { - STableKeyInfo info = {.pTable = (void *)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - comp = false; // no need to compare anymore - } - } - } else { - assert(pQueryInfo->optr == TSDB_RELATION_ISNULL || pQueryInfo->optr == TSDB_RELATION_NOTNULL); - - while (tSkipListIterNext(iter)) { - SSkipListNode *pNode = tSkipListIterGet(iter); - - bool isnull = isNull(SL_GET_NODE_KEY(pSkipList, pNode), pQueryInfo->sch.type); - if ((pQueryInfo->optr == TSDB_RELATION_ISNULL && isnull) || - (pQueryInfo->optr == TSDB_RELATION_NOTNULL && (!isnull))) { - STableKeyInfo info = {.pTable = (void *)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - } - } - } - } - - taosMemoryFree(cond.start); - taosMemoryFree(cond.end); - tSkipListDestroyIter(iter); -} - -static void queryIndexlessColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArray* res, __result_filter_fn_t filterFp) { - SSkipListIterator* iter = tSkipListCreateIter(pSkipList); - - while (tSkipListIterNext(iter)) { - bool addToResult = false; - - SSkipListNode *pNode = tSkipListIterGet(iter); - - char *pData = SL_GET_NODE_DATA(pNode); - tstr *name = (tstr*) tsdbGetTableName((void*) pData); - - // todo speed up by using hash - if (pQueryInfo->sch.colId == TSDB_TBNAME_COLUMN_INDEX) { - if (pQueryInfo->optr == TSDB_RELATION_IN) { - addToResult = pQueryInfo->compare(name, pQueryInfo->q); - } else if (pQueryInfo->optr == TSDB_RELATION_LIKE || - pQueryInfo->optr == TSDB_RELATION_MATCH || - pQueryInfo->optr == TSDB_RELATION_NMATCH) { - addToResult = !pQueryInfo->compare(name, pQueryInfo->q); - } - } else { - addToResult = filterFp(pNode, pQueryInfo); - } - - if (addToResult) { - STableKeyInfo info = {.pTable = (void*)pData, .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(res, &info); - } - } - - tSkipListDestroyIter(iter); -} - -// Apply the filter expression to each node in the skiplist to acquire the qualified nodes in skip list -//void getTableListfromSkipList(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, SExprTraverseSupp *param) { -// if (pExpr == NULL) { -// return; -// } -// -// tExprNode *pLeft = pExpr->_node.pLeft; -// tExprNode *pRight = pExpr->_node.pRight; -// -// // column project -// if (pLeft->nodeType != TSQL_NODE_EXPR && pRight->nodeType != TSQL_NODE_EXPR) { -// assert(pLeft->nodeType == TSQL_NODE_COL && (pRight->nodeType == TSQL_NODE_VALUE || pRight->nodeType == TSQL_NODE_DUMMY)); -// -// param->setupInfoFn(pExpr, param->pExtInfo); -// -// tQueryInfo *pQueryInfo = pExpr->_node.info; -// if (pQueryInfo->indexed && (pQueryInfo->optr != TSDB_RELATION_LIKE -// && pQueryInfo->optr != TSDB_RELATION_MATCH && pQueryInfo->optr != TSDB_RELATION_NMATCH -// && pQueryInfo->optr != TSDB_RELATION_IN)) { -// queryIndexedColumn(pSkipList, pQueryInfo, result); -// } else { -// queryIndexlessColumn(pSkipList, pQueryInfo, result, param->nodeFilterFn); -// } -// -// return; -// } -// -// // The value of hasPK is always 0. -// uint8_t weight = pLeft->_node.hasPK + pRight->_node.hasPK; -// assert(weight == 0 && pSkipList != NULL && taosArrayGetSize(result) == 0); -// -// //apply the hierarchical filter expression to every node in skiplist to find the qualified nodes -// applyFilterToSkipListNode(pSkipList, pExpr, result, param); -//} -#endif diff --git a/source/dnode/vnode/src/tsdb/tsdbReadImpl.c b/source/dnode/vnode/src/tsdb/tsdbReadImpl.c index c1a1e7570ecdeba56c03f9b1b6fcb28894089610..d51521c41c954821163d17a1eddf4a4ddee7f5ad 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReadImpl.c +++ b/source/dnode/vnode/src/tsdb/tsdbReadImpl.c @@ -20,11 +20,11 @@ static void tsdbResetReadTable(SReadH *pReadh); static void tsdbResetReadFile(SReadH *pReadh); static int tsdbLoadBlockOffset(SReadH *pReadh, SBlock *pBlock); -static int tsdbLoadBlockDataImpl(SReadH *pReadh, SBlock *pBlock, SDataCols *pDataCols); +static int tsdbLoadBlockDataImpl(SReadH *pReadh, SBlock *pBlock, SDataCols *pDataCols, int8_t bitmapMode); static int tsdbCheckAndDecodeColumnData(SDataCol *pDataCol, void *content, int32_t len, int32_t bitmapLen, int8_t comp, int numOfRows, int numOfBitmaps, int maxPoints, char *buffer, int bufferSize); static int tsdbLoadBlockDataColsImpl(SReadH *pReadh, SBlock *pBlock, SDataCols *pDataCols, const int16_t *colIds, - int numOfColIds); + int numOfColIds, int8_t bitmapMode); static int tsdbLoadColData(SReadH *pReadh, SDFile *pDFile, SBlock *pBlock, SBlockCol *pBlockCol, SDataCol *pDataCol); int tsdbInitReadH(SReadH *pReadh, STsdb *pRepo) { @@ -157,7 +157,7 @@ int tsdbLoadBlockIdx(SReadH *pReadh) { } int tsdbSetReadTable(SReadH *pReadh, STable *pTable) { - STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1); + STSchema *pSchema = tsdbGetTableSchemaImpl(TSDB_READ_REPO(pReadh), pTable, false, false, -1); pReadh->pTable = pTable; @@ -252,6 +252,45 @@ static FORCE_INLINE void tsdbSwapDataCols(SDataCols *pDest, SDataCols *pSrc) { pSrc->cols = pCols; } +static void printTsdbLoadBlkData(SReadH *readh, SDataCols *pDCols, SBlock *pBlock, const char *tag, int32_t ln) { + printf("%s:%d:%" PRIi64 " ================\n", tag, ln, taosGetSelfPthreadId()); + if (pBlock) { + SDFile *pHeadf = TSDB_READ_HEAD_FILE(readh); + printf("%s:%d:%" PRIi64 ":%p:%d %s\n", tag, ln, taosGetSelfPthreadId(), pBlock, (int32_t)pBlock->len, + pHeadf->f.aname); + SDFile *pDFile = pBlock->last ? TSDB_READ_LAST_FILE(readh) : TSDB_READ_DATA_FILE(readh); + printf("%s:%d:%" PRIi64 ":%p:%d %s\n", tag, ln, taosGetSelfPthreadId(), pBlock, (int32_t)pBlock->len, + pDFile->f.aname); + } + SDataCol *pDCol = pDCols->cols + 0; + if (TSKEY_MIN == *(int64_t *)pDCol->pData) { + ASSERT(0); + } + + int rows = pDCols->numOfRows; + for (int r = 0; r < rows; ++r) { + if (pBlock) { + printf("%s:%d:%" PRIi64 ":%p:%d rows[%d][%d] ", tag, ln, taosGetSelfPthreadId(), pBlock, (int32_t)pBlock->len, + rows, r); + } else { + printf("%s:%d:%" PRIi64 ":%s rows[%d][%d] ", tag, ln, taosGetSelfPthreadId(), "=== merge === ", rows, r); + } + + int nDataCols = pDCols->numOfCols; + int j = 0; + SCellVal sVal = {0}; + while (j < nDataCols) { + SDataCol *pDataCol = pDCols->cols + j; + tdGetColDataOfRow(&sVal, pDataCol, r, pDCols->bitmapMode); + tdSCellValPrint(&sVal, pDataCol->type); + ++j; + } + printf("\n"); + } + + fflush(stdout); +} + int tsdbLoadBlockData(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo) { ASSERT(pBlock->numOfSubBlocks > 0); STsdbCfg *pCfg = REPO_CFG(pReadh->pRepo); @@ -266,14 +305,23 @@ int tsdbLoadBlockData(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo) { } } - if (tsdbLoadBlockDataImpl(pReadh, iBlock, pReadh->pDCols[0]) < 0) return -1; + if (tsdbLoadBlockDataImpl(pReadh, iBlock, pReadh->pDCols[0], TSDB_BITMODE_ONE_BIT) < 0) return -1; +#ifdef TD_DEBUG_PRINT_TSDB_LOAD_DCOLS + printTsdbLoadBlkData(pReadh, pReadh->pDCols[0], iBlock, __func__, __LINE__); +#endif for (int i = 1; i < pBlock->numOfSubBlocks; i++) { iBlock++; - if (tsdbLoadBlockDataImpl(pReadh, iBlock, pReadh->pDCols[1]) < 0) return -1; + if (tsdbLoadBlockDataImpl(pReadh, iBlock, pReadh->pDCols[1], TSDB_BITMODE_DEFAULT) < 0) return -1; +#ifdef TD_DEBUG_PRINT_TSDB_LOAD_DCOLS + printTsdbLoadBlkData(pReadh, pReadh->pDCols[1], iBlock, __func__, __LINE__); +#endif // TODO: use the real maxVersion to replace the UINT64_MAX to support Multi-Version if (tdMergeDataCols(pReadh->pDCols[0], pReadh->pDCols[1], pReadh->pDCols[1]->numOfRows, NULL, TD_SUPPORT_UPDATE(update), TD_VER_MAX) < 0) return -1; +#ifdef TD_DEBUG_PRINT_TSDB_LOAD_DCOLS + printTsdbLoadBlkData(pReadh, pReadh->pDCols[0], iBlock, " === MERGE === ", __LINE__); +#endif } // if ((pBlock->numOfSubBlocks == 1) && (iBlock->hasDupKey)) { // TODO: use this line if (pBlock->numOfSubBlocks == 1) { @@ -285,6 +333,9 @@ int tsdbLoadBlockData(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo) { } tsdbSwapDataCols(pReadh->pDCols[0], pReadh->pDCols[1]); ASSERT(pReadh->pDCols[0]->bitmapMode != 0); +#ifdef TD_DEBUG_PRINT_TSDB_LOAD_DCOLS + printTsdbLoadBlkData(pReadh, pReadh->pDCols[0], iBlock, " === UPDATE FILTER === ", __LINE__); +#endif } ASSERT(pReadh->pDCols[0]->numOfRows <= pBlock->numOfRows); @@ -294,6 +345,53 @@ int tsdbLoadBlockData(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo) { return 0; } +static void printTsdbLoadBlkDataCols(SReadH *readh, SDataCols *pDCols, SBlock *pBlock, const int16_t *colIds, + int numOfColsIds, const char *tag, int32_t ln) { + printf("%s:%d:%" PRIi64 " ================\n", tag, ln, taosGetSelfPthreadId()); + if (pBlock) { + SDFile *pHeadf = TSDB_READ_HEAD_FILE(readh); + printf("%s:%d:%" PRIi64 ":%p:%d %s\n", tag, ln, taosGetSelfPthreadId(), pBlock, (int32_t)pBlock->len, + pHeadf->f.aname); + SDFile *pDFile = pBlock->last ? TSDB_READ_LAST_FILE(readh) : TSDB_READ_DATA_FILE(readh); + printf("%s:%d:%" PRIi64 ":%p:%d %s\n", tag, ln, taosGetSelfPthreadId(), pBlock, (int32_t)pBlock->len, + pDFile->f.aname); + } + + int rows = pDCols->numOfRows; + for (int r = 0; r < rows; ++r) { + if (pBlock) { + printf("%s:%d:%" PRIi64 ":%p:%d rows[%d][%d] ", tag, ln, taosGetSelfPthreadId(), pBlock, (int32_t)pBlock->len, + rows, r); + } else { + printf("%s:%d:%" PRIi64 ":%s rows[%d][%d] ", tag, ln, taosGetSelfPthreadId(), "=== merge === ", rows, r); + } + + int nDataCols = pDCols->numOfCols; + int j = 0, k = 0; + SCellVal sVal = {0}; + while (j < nDataCols) { + if (k >= numOfColsIds) break; + SDataCol *pDataCol = pDCols->cols + j; + int16_t colId1 = pDataCol->colId; + int16_t colId2 = *(colIds + k); + if (colId1 < colId2) { + ++j; + } else if (colId1 > colId2) { + ++k; // colId2 not exists in SDataCols + printf("NotExists "); + } else { + tdGetColDataOfRow(&sVal, pDataCol, r, pDCols->bitmapMode); + tdSCellValPrint(&sVal, pDataCol->type); + ++j; + ++k; + } + } + printf("\n"); + } + + fflush(stdout); +} + // TODO: filter by Multi-Version int tsdbLoadBlockDataCols(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo, const int16_t *colIds, int numOfColsIds, bool mergeBitmap) { @@ -309,14 +407,25 @@ int tsdbLoadBlockDataCols(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo, } } - if (tsdbLoadBlockDataColsImpl(pReadh, iBlock, pReadh->pDCols[0], colIds, numOfColsIds) < 0) return -1; + if (tsdbLoadBlockDataColsImpl(pReadh, iBlock, pReadh->pDCols[0], colIds, numOfColsIds, TSDB_BITMODE_ONE_BIT) < 0) + return -1; +#ifdef TD_DEBUG_PRINT_TSDB_LOAD_DCOLS + printTsdbLoadBlkDataCols(pReadh, pReadh->pDCols[0], iBlock, colIds, numOfColsIds, __func__, __LINE__); +#endif for (int i = 1; i < pBlock->numOfSubBlocks; i++) { iBlock++; - if (tsdbLoadBlockDataColsImpl(pReadh, iBlock, pReadh->pDCols[1], colIds, numOfColsIds) < 0) return -1; + if (tsdbLoadBlockDataColsImpl(pReadh, iBlock, pReadh->pDCols[1], colIds, numOfColsIds, TSDB_BITMODE_DEFAULT) < 0) + return -1; +#ifdef TD_DEBUG_PRINT_TSDB_LOAD_DCOLS + printTsdbLoadBlkDataCols(pReadh, pReadh->pDCols[1], iBlock, colIds, numOfColsIds, __func__, __LINE__); +#endif // TODO: use the real maxVersion to replace the UINT64_MAX to support Multi-Version if (tdMergeDataCols(pReadh->pDCols[0], pReadh->pDCols[1], pReadh->pDCols[1]->numOfRows, NULL, TD_SUPPORT_UPDATE(update), TD_VER_MAX) < 0) return -1; +#ifdef TD_DEBUG_PRINT_TSDB_LOAD_DCOLS + printTsdbLoadBlkDataCols(pReadh, pReadh->pDCols[0], NULL, colIds, numOfColsIds, __func__, __LINE__); +#endif } // if ((pBlock->numOfSubBlocks == 1) && (iBlock->hasDupKey)) { // TODO: use this line if (pBlock->numOfSubBlocks == 1) { @@ -328,18 +437,23 @@ int tsdbLoadBlockDataCols(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo, } tsdbSwapDataCols(pReadh->pDCols[0], pReadh->pDCols[1]); ASSERT(pReadh->pDCols[0]->bitmapMode != 0); +#ifdef TD_DEBUG_PRINT_TSDB_LOAD_DCOLS + printTsdbLoadBlkDataCols(pReadh, pReadh->pDCols[0], NULL, colIds, numOfColsIds, + " === update filter === ", __LINE__); +#endif } if (mergeBitmap && !tdDataColsIsBitmapI(pReadh->pDCols[0])) { for (int i = 0; i < numOfColsIds; ++i) { SDataCol *pDataCol = pReadh->pDCols[0]->cols + i; if (pDataCol->len > 0 && pDataCol->bitmap) { - ASSERT(pDataCol->colId != PRIMARYKEY_TIMESTAMP_COL_ID); - ASSERT(pDataCol->pBitmap); tdMergeBitmap(pDataCol->pBitmap, pReadh->pDCols[0]->numOfRows, pDataCol->pBitmap); tdDataColsSetBitmapI(pReadh->pDCols[0]); } } +#ifdef TD_DEBUG_PRINT_TSDB_LOAD_DCOLS + printTsdbLoadBlkDataCols(pReadh, pReadh->pDCols[0], NULL, colIds, numOfColsIds, " === merge bitmap === ", __LINE__); +#endif } ASSERT(pReadh->pDCols[0]->numOfRows <= pBlock->numOfRows); @@ -543,16 +657,14 @@ static void tsdbResetReadFile(SReadH *pReadh) { tsdbCloseDFileSet(TSDB_READ_FSET(pReadh)); } -static int tsdbLoadBlockDataImpl(SReadH *pReadh, SBlock *pBlock, SDataCols *pDataCols) { +static int tsdbLoadBlockDataImpl(SReadH *pReadh, SBlock *pBlock, SDataCols *pDataCols, int8_t bitmapMode) { ASSERT(pBlock->numOfSubBlocks == 0 || pBlock->numOfSubBlocks == 1); SDFile *pDFile = (pBlock->last) ? TSDB_READ_LAST_FILE(pReadh) : TSDB_READ_DATA_FILE(pReadh); tdResetDataCols(pDataCols); - if (tsdbIsSupBlock(pBlock)) { - tdDataColsSetBitmapI(pDataCols); - } + pDataCols->bitmapMode = bitmapMode; if (tsdbMakeRoom((void **)(&TSDB_READ_BUF(pReadh)), pBlock->len) < 0) return -1; @@ -730,7 +842,7 @@ static int tsdbCheckAndDecodeColumnData(SDataCol *pDataCol, void *content, int32 } static int tsdbLoadBlockDataColsImpl(SReadH *pReadh, SBlock *pBlock, SDataCols *pDataCols, const int16_t *colIds, - int numOfColIds) { + int numOfColIds, int8_t bitmapMode) { ASSERT(pBlock->numOfSubBlocks == 0 || pBlock->numOfSubBlocks == 1); ASSERT(colIds[0] == PRIMARYKEY_TIMESTAMP_COL_ID); @@ -739,9 +851,7 @@ static int tsdbLoadBlockDataColsImpl(SReadH *pReadh, SBlock *pBlock, SDataCols * tdResetDataCols(pDataCols); - if (tsdbIsSupBlock(pBlock)) { - tdDataColsSetBitmapI(pDataCols); - } + pDataCols->bitmapMode = bitmapMode; // If only load timestamp column, no need to load SBlockData part if (numOfColIds > 1 && tsdbLoadBlockOffset(pReadh, pBlock) < 0) return -1; diff --git a/source/dnode/vnode/src/tsdb/tsdbSma.c b/source/dnode/vnode/src/tsdb/tsdbSma.c index 18cf18dbad32bb1a780d098c0343c8c7894f700b..45b17a0180e4dabd411b01757c35e40910d62579 100644 --- a/source/dnode/vnode/src/tsdb/tsdbSma.c +++ b/source/dnode/vnode/src/tsdb/tsdbSma.c @@ -2015,7 +2015,7 @@ static FORCE_INLINE int32_t tsdbExecuteRSmaImpl(STsdb *pTsdb, const void *pMsg, tsdbDebug("vgId:%d execute rsma %" PRIi8 " task for qTaskInfo:%p suid:%" PRIu64, REPO_ID(pTsdb), level, taskInfo, suid); - qSetStreamInput(taskInfo, pMsg, inputType); + qSetStreamInput(taskInfo, pMsg, inputType, false); while (1) { SSDataBlock *output = NULL; uint64_t ts; @@ -2040,7 +2040,7 @@ static FORCE_INLINE int32_t tsdbExecuteRSmaImpl(STsdb *pTsdb, const void *pMsg, blockDebugShowData(pResult); STsdb *sinkTsdb = (level == TSDB_RETENTION_L1 ? pTsdb->pVnode->pRSma1 : pTsdb->pVnode->pRSma2); SSubmitReq *pReq = NULL; - if (buildSubmitReqFromDataBlock(&pReq, pResult, pTSchema, TD_VID(pTsdb->pVnode), uid, suid) != 0) { + if (buildSubmitReqFromDataBlock(&pReq, pResult, pTSchema, TD_VID(pTsdb->pVnode), suid) != 0) { taosArrayDestroy(pResult); return TSDB_CODE_FAILED; } @@ -2083,7 +2083,7 @@ static int32_t tsdbExecuteRSma(STsdb *pTsdb, const void *pMsg, int32_t inputType } if (inputType == STREAM_DATA_TYPE_SUBMIT_BLOCK) { - // TODO: use the proper schema instead of 0, and cache STSchema in cache + // TODO: use the proper schema instead of 1, and cache STSchema in cache STSchema *pTSchema = metaGetTbTSchema(pTsdb->pVnode->pMeta, suid, 1); if (!pTSchema) { terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION; diff --git a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c new file mode 100644 index 0000000000000000000000000000000000000000..79989a55601b99e681c573cae1f5c26e38cd7421 --- /dev/null +++ b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "tsdb.h" + +struct STsdbSnapshotReader { + STsdb* pTsdb; + // TODO +}; + +int32_t tsdbSnapshotReaderOpen(STsdb* pTsdb, STsdbSnapshotReader** ppReader, int64_t sver, int64_t ever) { + // TODO + return 0; +} + +int32_t tsdbSnapshotReaderClose(STsdbSnapshotReader* pReader) { + // TODO + return 0; +} + +int32_t tsdbSnapshotRead(STsdbSnapshotReader* pReader, void** ppData, uint32_t* nData) { + // TODO + return 0; +} diff --git a/source/dnode/vnode/src/tsdb/tsdbWrite.c b/source/dnode/vnode/src/tsdb/tsdbWrite.c index a67f413ba7f2016797cbcfcf90b0efe094171904..aab4da26a37119c6e0044849b1494ede2f33e552 100644 --- a/source/dnode/vnode/src/tsdb/tsdbWrite.c +++ b/source/dnode/vnode/src/tsdb/tsdbWrite.c @@ -85,7 +85,7 @@ static FORCE_INLINE int tsdbCheckRowRange(STsdb *pTsdb, tb_uid_t uid, STSRow *ro return 0; } -int tsdbScanAndConvertSubmitMsg(STsdb *pTsdb, const SSubmitReq *pMsg) { +int tsdbScanAndConvertSubmitMsg(STsdb *pTsdb, SSubmitReq *pMsg) { ASSERT(pMsg != NULL); // STsdbMeta * pMeta = pTsdb->tsdbMeta; SSubmitMsgIter msgIter = {0}; @@ -150,7 +150,6 @@ int tsdbScanAndConvertSubmitMsg(STsdb *pTsdb, const SSubmitReq *pMsg) { return -1; } } - } if (terrno != TSDB_CODE_SUCCESS) return -1; diff --git a/source/dnode/vnode/src/vnd/vnodeQuery.c b/source/dnode/vnode/src/vnd/vnodeQuery.c index 3b47b9025492a7ad53514750b9e88dbf01f52d49..9afe25fbf10e866805ba3a9f096071690584376f 100644 --- a/source/dnode/vnode/src/vnd/vnodeQuery.c +++ b/source/dnode/vnode/src/vnd/vnodeQuery.c @@ -64,7 +64,7 @@ int vnodeGetTableMeta(SVnode *pVnode, SRpcMsg *pMsg) { if (mer1.me.type == TSDB_SUPER_TABLE) { strcpy(metaRsp.stbName, mer1.me.name); - schema = mer1.me.stbEntry.schema; + schema = mer1.me.stbEntry.schemaRow; schemaTag = mer1.me.stbEntry.schemaTag; metaRsp.suid = mer1.me.uid; } else if (mer1.me.type == TSDB_CHILD_TABLE) { @@ -73,10 +73,10 @@ int vnodeGetTableMeta(SVnode *pVnode, SRpcMsg *pMsg) { strcpy(metaRsp.stbName, mer2.me.name); metaRsp.suid = mer2.me.uid; - schema = mer2.me.stbEntry.schema; + schema = mer2.me.stbEntry.schemaRow; schemaTag = mer2.me.stbEntry.schemaTag; } else if (mer1.me.type == TSDB_NORMAL_TABLE) { - schema = mer1.me.ntbEntry.schema; + schema = mer1.me.ntbEntry.schemaRow; } else { ASSERT(0); } @@ -84,7 +84,7 @@ int vnodeGetTableMeta(SVnode *pVnode, SRpcMsg *pMsg) { metaRsp.numOfTags = schemaTag.nCols; metaRsp.numOfColumns = schema.nCols; metaRsp.precision = pVnode->config.tsdbCfg.precision; - metaRsp.sversion = schema.sver; + metaRsp.sversion = schema.version; metaRsp.pSchemas = (SSchema *)taosMemoryMalloc(sizeof(SSchema) * (metaRsp.numOfColumns + metaRsp.numOfTags)); memcpy(metaRsp.pSchemas, schema.pSchema, sizeof(SSchema) * schema.nCols); @@ -147,16 +147,10 @@ void vnodeGetInfo(SVnode *pVnode, const char **dbname, int32_t *vgId) { } // wrapper of tsdb read interface -tsdbReaderT tsdbQueryCacheLast(SVnode *pVnode, SQueryTableDataCond *pCond, STableGroupInfo *groupList, uint64_t qId, +tsdbReaderT tsdbQueryCacheLast(SVnode *pVnode, SQueryTableDataCond *pCond, STableListInfo* tableList, uint64_t qId, void *pMemRef) { #if 0 return tsdbQueryCacheLastT(pVnode->pTsdb, pCond, groupList, qId, pMemRef); #endif return 0; -} -int32_t tsdbGetTableGroupFromIdList(SVnode *pVnode, SArray *pTableIdList, STableGroupInfo *pGroupInfo) { -#if 0 - return tsdbGetTableGroupFromIdListT(pVnode->pTsdb, pTableIdList, pGroupInfo); -#endif - return 0; } \ No newline at end of file diff --git a/source/dnode/vnode/src/vnd/vnodeSnapshot.c b/source/dnode/vnode/src/vnd/vnodeSnapshot.c new file mode 100644 index 0000000000000000000000000000000000000000..baa8422307dd7785201bcc4b8b632bb3c05a37cb --- /dev/null +++ b/source/dnode/vnode/src/vnd/vnodeSnapshot.c @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "vnodeInt.h" + +struct SVSnapshotReader { + SVnode *pVnode; + int64_t sver; + int64_t ever; + int8_t isMetaEnd; + int8_t isTsdbEnd; + SMetaSnapshotReader *pMetaReader; + STsdbSnapshotReader *pTsdbReader; + void *pData; + int32_t nData; +}; + +int32_t vnodeSnapshotReaderOpen(SVnode *pVnode, SVSnapshotReader **ppReader, int64_t sver, int64_t ever) { + SVSnapshotReader *pReader = NULL; + + pReader = (SVSnapshotReader *)taosMemoryCalloc(1, sizeof(*pReader)); + if (pReader == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } + pReader->pVnode = pVnode; + pReader->sver = sver; + pReader->ever = ever; + pReader->isMetaEnd = 0; + pReader->isTsdbEnd = 0; + + if (metaSnapshotReaderOpen(pVnode->pMeta, &pReader->pMetaReader, sver, ever) < 0) { + taosMemoryFree(pReader); + goto _err; + } + + if (tsdbSnapshotReaderOpen(pVnode->pTsdb, &pReader->pTsdbReader, sver, ever) < 0) { + metaSnapshotReaderClose(pReader->pMetaReader); + taosMemoryFree(pReader); + goto _err; + } + +_exit: + *ppReader = pReader; + return 0; + +_err: + *ppReader = NULL; + return -1; +} + +int32_t vnodeSnapshotReaderClose(SVSnapshotReader *pReader) { + if (pReader) { + vnodeFree(pReader->pData); + tsdbSnapshotReaderClose(pReader->pTsdbReader); + metaSnapshotReaderClose(pReader->pMetaReader); + taosMemoryFree(pReader); + } + return 0; +} + +int32_t vnodeSnapshotRead(SVSnapshotReader *pReader, const void **ppData, uint32_t *nData) { + int32_t code = 0; + + if (!pReader->isMetaEnd) { + code = metaSnapshotRead(pReader->pMetaReader, &pReader->pData, &pReader->nData); + if (code) { + if (code == TSDB_CODE_VND_READ_END) { + pReader->isMetaEnd = 1; + } else { + return code; + } + } else { + *ppData = pReader->pData; + *nData = pReader->nData; + return code; + } + } + + if (!pReader->isTsdbEnd) { + code = tsdbSnapshotRead(pReader->pTsdbReader, &pReader->pData, &pReader->nData); + if (code) { + if (code == TSDB_CODE_VND_READ_END) { + pReader->isTsdbEnd = 1; + } else { + return code; + } + } else { + *ppData = pReader->pData; + *nData = pReader->nData; + return code; + } + } + + code = TSDB_CODE_VND_READ_END; + return code; +} \ No newline at end of file diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 88cc97ddd525ed20d47c3c82879fad2d792953be..74b6982008da95f55448b699e5896b86fefeb1b1 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -24,26 +24,74 @@ static int vnodeProcessDropTbReq(SVnode *pVnode, int64_t version, void *pReq, in static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp); static int vnodeProcessCreateTSmaReq(SVnode *pVnode, int64_t version, void *pReq, int len, SRpcMsg *pRsp); -int vnodePreprocessWriteReqs(SVnode *pVnode, SArray *pMsgs, int64_t *version) { -#if 0 - SRpcMsg *pMsg; - SRpcMsg *pRpc; - - *version = pVnode->state.processed; - for (int i = 0; i < taosArrayGetSize(pMsgs); i++) { - pMsg = *(SRpcMsg **)taosArrayGet(pMsgs, i); - pRpc = pMsg; - - // set request version - if (walWrite(pVnode->pWal, pVnode->state.processed++, pRpc->msgType, pRpc->pCont, pRpc->contLen) < 0) { - vError("vnode:%d write wal error since %s", TD_VID(pVnode), terrstr()); - return -1; - } - } +int32_t vnodePreprocessReq(SVnode *pVnode, SRpcMsg *pMsg) { + SDecoder dc = {0}; - walFsync(pVnode->pWal, false); + switch (pMsg->msgType) { + case TDMT_VND_CREATE_TABLE: { + int64_t ctime = taosGetTimestampMs(); + int32_t nReqs; + + tDecoderInit(&dc, (uint8_t *)pMsg->pCont + sizeof(SMsgHead), pMsg->contLen - sizeof(SMsgHead)); + tStartDecode(&dc); + + tDecodeI32v(&dc, &nReqs); + for (int32_t iReq = 0; iReq < nReqs; iReq++) { + tb_uid_t uid = tGenIdPI64(); + char *name = NULL; + tStartDecode(&dc); + + tDecodeI32v(&dc, NULL); + tDecodeCStr(&dc, &name); + *(int64_t *)(dc.data + dc.pos) = uid; + *(int64_t *)(dc.data + dc.pos + 8) = ctime; + + tEndDecode(&dc); + } + + tEndDecode(&dc); + tDecoderClear(&dc); + } break; + case TDMT_VND_SUBMIT: { + SSubmitMsgIter msgIter = {0}; + SSubmitReq *pSubmitReq = (SSubmitReq *)pMsg->pCont; + SSubmitBlk *pBlock = NULL; + int64_t ctime = taosGetTimestampMs(); + tb_uid_t uid; + + tInitSubmitMsgIter(pSubmitReq, &msgIter); + + for (;;) { + tGetSubmitMsgNext(&msgIter, &pBlock); + if (pBlock == NULL) break; + + if (msgIter.schemaLen > 0) { + char *name = NULL; + + tDecoderInit(&dc, pBlock->data, msgIter.schemaLen); + tStartDecode(&dc); + + tDecodeI32v(&dc, NULL); + tDecodeCStr(&dc, &name); + + uid = metaGetTableEntryUidByName(pVnode->pMeta, name); + if (uid == 0) { + uid = tGenIdPI64(); + } + *(int64_t *)(dc.data + dc.pos) = uid; + *(int64_t *)(dc.data + dc.pos + 8) = ctime; + pBlock->uid = htobe64(uid); + + tEndDecode(&dc); + tDecoderClear(&dc); + } + } + + } break; + default: + break; + } -#endif return 0; } @@ -106,13 +154,6 @@ int vnodeProcessWriteReq(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRpcMsg pMsg->contLen - sizeof(SMsgHead)) < 0) { } } break; -#if 0 - case TDMT_VND_TASK_WRITE_EXEC: { - if (tqProcessTaskExec(pVnode->pTq, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)), pMsg->contLen - sizeof(SMsgHead), - 0) < 0) { - } - } break; -#endif case TDMT_VND_ALTER_VNODE: break; default: @@ -150,9 +191,9 @@ int vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) { SReadHandle handle = {.meta = pVnode->pMeta, .config = &pVnode->config, .vnode = pVnode, .pMsgCb = &pVnode->msgCb}; switch (pMsg->msgType) { case TDMT_VND_QUERY: - return qWorkerProcessQueryMsg(&handle, pVnode->pQuery, pMsg); + return qWorkerProcessQueryMsg(&handle, pVnode->pQuery, pMsg, 0); case TDMT_VND_QUERY_CONTINUE: - return qWorkerProcessCQueryMsg(&handle, pVnode->pQuery, pMsg); + return qWorkerProcessCQueryMsg(&handle, pVnode->pQuery, pMsg, 0); default: vError("unknown msg type:%d in query queue", pMsg->msgType); return TSDB_CODE_VND_APP_ERROR; @@ -165,17 +206,16 @@ int vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) { int32_t msgLen = pMsg->contLen - sizeof(SMsgHead); switch (pMsg->msgType) { case TDMT_VND_FETCH: - return qWorkerProcessFetchMsg(pVnode, pVnode->pQuery, pMsg); + return qWorkerProcessFetchMsg(pVnode, pVnode->pQuery, pMsg, 0); case TDMT_VND_FETCH_RSP: - return qWorkerProcessFetchRsp(pVnode, pVnode->pQuery, pMsg); - case TDMT_VND_RES_READY: - return qWorkerProcessReadyMsg(pVnode, pVnode->pQuery, pMsg); - case TDMT_VND_TASKS_STATUS: - return qWorkerProcessStatusMsg(pVnode, pVnode->pQuery, pMsg); + return qWorkerProcessFetchRsp(pVnode, pVnode->pQuery, pMsg, 0); case TDMT_VND_CANCEL_TASK: - return qWorkerProcessCancelMsg(pVnode, pVnode->pQuery, pMsg); + return qWorkerProcessCancelMsg(pVnode, pVnode->pQuery, pMsg, 0); case TDMT_VND_DROP_TASK: - return qWorkerProcessDropMsg(pVnode, pVnode->pQuery, pMsg); + return qWorkerProcessDropMsg(pVnode, pVnode->pQuery, pMsg, 0); + case TDMT_VND_QUERY_HEARTBEAT: + return qWorkerProcessHbMsg(pVnode, pVnode->pQuery, pMsg, 0); + case TDMT_VND_TABLE_META: return vnodeGetTableMeta(pVnode, pMsg); case TDMT_VND_CONSUME: @@ -194,20 +234,6 @@ int vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) { return tqProcessTaskDispatchRsp(pVnode->pTq, pMsg); case TDMT_VND_TASK_RECOVER_RSP: return tqProcessTaskRecoverRsp(pVnode->pTq, pMsg); - -#if 0 - case TDMT_VND_TASK_PIPE_EXEC: - case TDMT_VND_TASK_MERGE_EXEC: - return tqProcessTaskExec(pVnode->pTq, msgstr, msgLen, 0); - case TDMT_VND_STREAM_TRIGGER:{ - // refactor, avoid double free - int code = tqProcessStreamTrigger(pVnode->pTq, pMsg->pCont, pMsg->contLen, 0); - pMsg->pCont = NULL; - return code; - } -#endif - case TDMT_VND_QUERY_HEARTBEAT: - return qWorkerProcessHbMsg(pVnode, pVnode->pQuery, pMsg); default: vError("unknown msg type:%d in fetch queue", pMsg->msgType); return TSDB_CODE_VND_APP_ERROR; @@ -234,7 +260,7 @@ int vnodeProcessSyncReq(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) { SMsgHead *pHead = pMsg->pCont; - char logBuf[512]; + char logBuf[512] = {0}; char *syncNodeStr = sync2SimpleStr(pVnode->sync); snprintf(logBuf, sizeof(logBuf), "==vnodeProcessSyncReq== msgType:%d, syncNode: %s", pMsg->msgType, syncNodeStr); syncRpcMsgLog2(logBuf, pMsg); @@ -595,16 +621,18 @@ static int vnodeDebugPrintSingleSubmitMsg(SMeta *pMeta, SSubmitBlk *pBlock, SSub STSchema *pSchema = NULL; tb_uid_t suid = 0; STSRow *row = NULL; + int32_t rv = -1; tInitSubmitBlkIter(msgIter, pBlock, &blkIter); if (blkIter.row == NULL) return 0; - if (!pSchema || (suid != msgIter->suid)) { + if (!pSchema || (suid != msgIter->suid) || rv != TD_ROW_SVER(blkIter.row)) { if (pSchema) { taosMemoryFreeClear(pSchema); } - pSchema = metaGetTbTSchema(pMeta, msgIter->suid, 1); // TODO: use the real schema + pSchema = metaGetTbTSchema(pMeta, msgIter->suid, TD_ROW_SVER(blkIter.row)); // TODO: use the real schema if (pSchema) { suid = msgIter->suid; + rv = TD_ROW_SVER(blkIter.row); } } if (!pSchema) { @@ -650,6 +678,7 @@ static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, in int32_t nRows; int32_t tsize, ret; SEncoder encoder = {0}; + SArray *newTbUids = NULL; terrno = TSDB_CODE_SUCCESS; pRsp->code = 0; @@ -670,12 +699,13 @@ static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, in } submitRsp.pArray = taosArrayInit(pSubmitReq->numOfBlocks, sizeof(SSubmitBlkRsp)); + newTbUids = taosArrayInit(pSubmitReq->numOfBlocks, sizeof(int64_t)); if (!submitRsp.pArray) { pRsp->code = TSDB_CODE_OUT_OF_MEMORY; goto _exit; } - for (int i = 0;;) { + for (;;) { tGetSubmitMsgNext(&msgIter, &pBlock); if (pBlock == NULL) break; @@ -699,6 +729,7 @@ static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, in goto _exit; } } + taosArrayPush(newTbUids, &createTbReq.uid); submitBlkRsp.uid = createTbReq.uid; submitBlkRsp.tblFName = taosMemoryMalloc(strlen(pVnode->config.dbname) + strlen(createTbReq.name) + 2); @@ -726,8 +757,10 @@ static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, in submitRsp.affectedRows += submitBlkRsp.affectedRows; taosArrayPush(submitRsp.pArray, &submitBlkRsp); } + tqUpdateTbUidList(pVnode->pTq, newTbUids, true); _exit: + taosArrayDestroy(newTbUids); tEncodeSize(tEncodeSSubmitRsp, &submitRsp, tsize, ret); pRsp->pCont = rpcMallocCont(tsize); pRsp->contLen = tsize; diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c index 8659c418070cdccf4dc9c3164d36f5548199f030..d1468778531d08cb8f2744c7e953b452a28df810 100644 --- a/source/dnode/vnode/src/vnd/vnodeSync.c +++ b/source/dnode/vnode/src/vnd/vnodeSync.c @@ -56,7 +56,13 @@ void vnodeSyncStart(SVnode *pVnode) { void vnodeSyncClose(SVnode *pVnode) { syncStop(pVnode->sync); } -int32_t vnodeSyncEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) { return tmsgPutToQueue(msgcb, SYNC_QUEUE, pMsg); } +int32_t vnodeSyncEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) { + int32_t code = tmsgPutToQueue(msgcb, SYNC_QUEUE, pMsg); + if (code != 0) { + rpcFreeCont(pMsg->pCont); + } + return code; +} int32_t vnodeSyncSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) { return tmsgSendReq(pEpSet, pMsg); } @@ -74,7 +80,7 @@ void vnodeSyncCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) } if (cbMeta.index > beginIndex) { - char logBuf[256]; + char logBuf[256] = {0}; snprintf( logBuf, sizeof(logBuf), "==callback== ==CommitCb== execute, pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s, beginIndex :%ld\n", @@ -109,7 +115,7 @@ void vnodeSyncCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) tmsgPutToQueue(&(pVnode->msgCb), APPLY_QUEUE, &applyMsg); } else { - char logBuf[256]; + char logBuf[256] = {0}; snprintf(logBuf, sizeof(logBuf), "==callback== ==CommitCb== do not execute, pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s, " "beginIndex :%ld\n", @@ -120,7 +126,7 @@ void vnodeSyncCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) } void vnodeSyncPreCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) { - char logBuf[256]; + char logBuf[256] = {0}; snprintf(logBuf, sizeof(logBuf), "==callback== ==PreCommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s \n", pFsm, cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state)); @@ -128,7 +134,7 @@ void vnodeSyncPreCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMet } void vnodeSyncRollBackMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) { - char logBuf[256]; + char logBuf[256] = {0}; snprintf(logBuf, sizeof(logBuf), "==callback== ==RollBackCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s \n", pFsm, cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state)); syncRpcMsgLog2(logBuf, (SRpcMsg *)pMsg); @@ -136,10 +142,14 @@ void vnodeSyncRollBackMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta SSyncFSM *vnodeSyncMakeFsm(SVnode *pVnode) { SSyncFSM *pFsm = taosMemoryCalloc(1, sizeof(SSyncFSM)); + memset(pFsm, 0, sizeof(*pFsm)); pFsm->data = pVnode; pFsm->FpCommitCb = vnodeSyncCommitMsg; pFsm->FpPreCommitCb = vnodeSyncPreCommitMsg; pFsm->FpRollBackCb = vnodeSyncRollBackMsg; pFsm->FpGetSnapshot = vnodeSyncGetSnapshot; + pFsm->FpRestoreFinishCb = NULL; + pFsm->FpReConfigCb = NULL; + return pFsm; } \ No newline at end of file diff --git a/source/dnode/vnode/src/vnd/vnodeUtil.c b/source/dnode/vnode/src/vnd/vnodeUtil.c new file mode 100644 index 0000000000000000000000000000000000000000..cd942099bc8924fde06ea912b0eecdfbe72603cb --- /dev/null +++ b/source/dnode/vnode/src/vnd/vnodeUtil.c @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "vnd.h" + +int32_t vnodeRealloc(void** pp, int32_t size) { + uint8_t* p = NULL; + int32_t csize = 0; + + if (*pp) { + p = (uint8_t*)(*pp) - sizeof(int32_t); + csize = *(int32_t*)p; + } + + if (csize >= size) { + return 0; + } + + p = (uint8_t*)taosMemoryRealloc(p, size); + if (p == NULL) { + return TSDB_CODE_OUT_OF_MEMORY; + } + *(int32_t*)p = size; + *pp = p + sizeof(int32_t); + + return 0; +} + +void vnodeFree(void* p) { + if (p) { + taosMemoryFree(((uint8_t*)p) - sizeof(int32_t)); + } +} \ No newline at end of file diff --git a/source/dnode/vnode/test/tqMetaTest.cpp b/source/dnode/vnode/test/tqMetaTest.cpp deleted file mode 100644 index 627dbc6f18122e39cde2e243fd564ad622324cd2..0000000000000000000000000000000000000000 --- a/source/dnode/vnode/test/tqMetaTest.cpp +++ /dev/null @@ -1,279 +0,0 @@ -#include -#include -#include -#include - -#include "tqMetaStore.h" - -struct Foo { - int32_t a; -}; - -int FooSerializer(const void* pObj, STqSerializedHead** ppHead) { - Foo* foo = (Foo*)pObj; - if ((*ppHead) == NULL || (*ppHead)->ssize < sizeof(STqSerializedHead) + sizeof(int32_t)) { - *ppHead = (STqSerializedHead*)taosMemoryRealloc(*ppHead, sizeof(STqSerializedHead) + sizeof(int32_t)); - (*ppHead)->ssize = sizeof(STqSerializedHead) + sizeof(int32_t); - } - *(int32_t*)(*ppHead)->content = foo->a; - return (*ppHead)->ssize; -} - -const void* FooDeserializer(const STqSerializedHead* pHead, void** ppObj) { - if (*ppObj == NULL) { - *ppObj = taosMemoryRealloc(*ppObj, sizeof(int32_t)); - } - Foo* pFoo = *(Foo**)ppObj; - pFoo->a = *(int32_t*)pHead->content; - return NULL; -} - -void FooDeleter(void* pObj) { taosMemoryFree(pObj); } - -class TqMetaUpdateAppendTest : public ::testing::Test { - protected: - void SetUp() override { - taosRemoveDir(pathName); - pMeta = tqStoreOpen(pathName, FooSerializer, FooDeserializer, FooDeleter, TQ_UPDATE_APPEND); - ASSERT(pMeta); - } - - void TearDown() override { tqStoreClose(pMeta); } - - STqMetaStore* pMeta; - const char* pathName = "/tmp/tq_test"; -}; - -TEST_F(TqMetaUpdateAppendTest, copyPutTest) { - Foo foo; - foo.a = 3; - tqHandleCopyPut(pMeta, 1, &foo, sizeof(Foo)); - - Foo* pFoo = (Foo*)tqHandleGet(pMeta, 1); - EXPECT_EQ(pFoo == NULL, true); - - tqHandleCommit(pMeta, 1); - pFoo = (Foo*)tqHandleGet(pMeta, 1); - EXPECT_EQ(pFoo->a, 3); -} - -TEST_F(TqMetaUpdateAppendTest, persistTest) { - Foo* pFoo = (Foo*)taosMemoryMalloc(sizeof(Foo)); - pFoo->a = 2; - tqHandleMovePut(pMeta, 1, pFoo); - Foo* pBar = (Foo*)tqHandleGet(pMeta, 1); - EXPECT_EQ(pBar == NULL, true); - tqHandleCommit(pMeta, 1); - pBar = (Foo*)tqHandleGet(pMeta, 1); - EXPECT_EQ(pBar->a, pFoo->a); - pBar = (Foo*)tqHandleGet(pMeta, 2); - EXPECT_EQ(pBar == NULL, true); - - tqStoreClose(pMeta); - pMeta = tqStoreOpen(pathName, FooSerializer, FooDeserializer, FooDeleter, TQ_UPDATE_APPEND); - ASSERT(pMeta); - - pBar = (Foo*)tqHandleGet(pMeta, 1); - ASSERT_EQ(pBar != NULL, true); - EXPECT_EQ(pBar->a, 2); - - pBar = (Foo*)tqHandleGet(pMeta, 2); - EXPECT_EQ(pBar == NULL, true); -} - -TEST_F(TqMetaUpdateAppendTest, uncommittedTest) { - Foo* pFoo = (Foo*)taosMemoryMalloc(sizeof(Foo)); - pFoo->a = 3; - tqHandleMovePut(pMeta, 1, pFoo); - - pFoo = (Foo*)tqHandleGet(pMeta, 1); - EXPECT_EQ(pFoo == NULL, true); -} - -TEST_F(TqMetaUpdateAppendTest, abortTest) { - Foo* pFoo = (Foo*)taosMemoryMalloc(sizeof(Foo)); - pFoo->a = 3; - tqHandleMovePut(pMeta, 1, pFoo); - - pFoo = (Foo*)tqHandleGet(pMeta, 1); - EXPECT_EQ(pFoo == NULL, true); - - tqHandleAbort(pMeta, 1); - pFoo = (Foo*)tqHandleGet(pMeta, 1); - EXPECT_EQ(pFoo == NULL, true); -} - -TEST_F(TqMetaUpdateAppendTest, deleteTest) { - Foo* pFoo = (Foo*)taosMemoryMalloc(sizeof(Foo)); - pFoo->a = 3; - tqHandleMovePut(pMeta, 1, pFoo); - - pFoo = (Foo*)tqHandleGet(pMeta, 1); - EXPECT_EQ(pFoo == NULL, true); - - tqHandleCommit(pMeta, 1); - - pFoo = (Foo*)tqHandleGet(pMeta, 1); - ASSERT_EQ(pFoo != NULL, true); - EXPECT_EQ(pFoo->a, 3); - - tqHandleDel(pMeta, 1); - pFoo = (Foo*)tqHandleGet(pMeta, 1); - ASSERT_EQ(pFoo != NULL, true); - EXPECT_EQ(pFoo->a, 3); - - tqHandleCommit(pMeta, 1); - pFoo = (Foo*)tqHandleGet(pMeta, 1); - EXPECT_EQ(pFoo == NULL, true); - - tqStoreClose(pMeta); - pMeta = tqStoreOpen(pathName, FooSerializer, FooDeserializer, FooDeleter, TQ_UPDATE_APPEND); - ASSERT(pMeta); - - pFoo = (Foo*)tqHandleGet(pMeta, 1); - EXPECT_EQ(pFoo == NULL, true); -} - -TEST_F(TqMetaUpdateAppendTest, intxnPersist) { - Foo* pFoo = (Foo*)taosMemoryMalloc(sizeof(Foo)); - pFoo->a = 3; - tqHandleMovePut(pMeta, 1, pFoo); - tqHandleCommit(pMeta, 1); - - Foo* pBar = (Foo*)taosMemoryMalloc(sizeof(Foo)); - pBar->a = 4; - tqHandleMovePut(pMeta, 1, pBar); - - Foo* pFoo1 = (Foo*)tqHandleGet(pMeta, 1); - EXPECT_EQ(pFoo1->a, 3); - - tqStoreClose(pMeta); - pMeta = tqStoreOpen(pathName, FooSerializer, FooDeserializer, FooDeleter, TQ_UPDATE_APPEND); - ASSERT(pMeta); - - pFoo1 = (Foo*)tqHandleGet(pMeta, 1); - EXPECT_EQ(pFoo1->a, 3); - - tqHandleCommit(pMeta, 1); - - pFoo1 = (Foo*)tqHandleGet(pMeta, 1); - EXPECT_EQ(pFoo1->a, 4); - - tqStoreClose(pMeta); - pMeta = tqStoreOpen(pathName, FooSerializer, FooDeserializer, FooDeleter, TQ_UPDATE_APPEND); - ASSERT(pMeta); - - pFoo1 = (Foo*)tqHandleGet(pMeta, 1); - EXPECT_EQ(pFoo1->a, 4); -} - -TEST_F(TqMetaUpdateAppendTest, multiplePage) { - taosSeedRand(0); - std::vector v; - for (int i = 0; i < 1000; i++) { - v.push_back(taosRand()); - Foo foo; - foo.a = v[i]; - tqHandleCopyPut(pMeta, i, &foo, sizeof(Foo)); - } - for (int i = 0; i < 500; i++) { - tqHandleCommit(pMeta, i); - Foo* pFoo = (Foo*)tqHandleGet(pMeta, i); - ASSERT_EQ(pFoo != NULL, true) << " at idx " << i << "\n"; - EXPECT_EQ(pFoo->a, v[i]); - } - - tqStoreClose(pMeta); - pMeta = tqStoreOpen(pathName, FooSerializer, FooDeserializer, FooDeleter, TQ_UPDATE_APPEND); - ASSERT(pMeta); - - for (int i = 500; i < 1000; i++) { - tqHandleCommit(pMeta, i); - Foo* pFoo = (Foo*)tqHandleGet(pMeta, i); - ASSERT_EQ(pFoo != NULL, true) << " at idx " << i << "\n"; - EXPECT_EQ(pFoo->a, v[i]); - } - - for (int i = 0; i < 1000; i++) { - Foo* pFoo = (Foo*)tqHandleGet(pMeta, i); - ASSERT_EQ(pFoo != NULL, true) << " at idx " << i << "\n"; - EXPECT_EQ(pFoo->a, v[i]); - } -} - -TEST_F(TqMetaUpdateAppendTest, multipleRewrite) { - taosSeedRand(0); - std::vector v; - for (int i = 0; i < 1000; i++) { - v.push_back(taosRand()); - Foo foo; - foo.a = v[i]; - tqHandleCopyPut(pMeta, i, &foo, sizeof(Foo)); - } - - for (int i = 0; i < 500; i++) { - tqHandleCommit(pMeta, i); - v[i] = taosRand(); - Foo foo; - foo.a = v[i]; - tqHandleCopyPut(pMeta, i, &foo, sizeof(Foo)); - } - - for (int i = 500; i < 1000; i++) { - v[i] = taosRand(); - Foo foo; - foo.a = v[i]; - tqHandleCopyPut(pMeta, i, &foo, sizeof(Foo)); - } - - for (int i = 0; i < 1000; i++) { - tqHandleCommit(pMeta, i); - } - - tqStoreClose(pMeta); - pMeta = tqStoreOpen(pathName, FooSerializer, FooDeserializer, FooDeleter, TQ_UPDATE_APPEND); - ASSERT(pMeta); - - for (int i = 500; i < 1000; i++) { - v[i] = taosRand(); - Foo foo; - foo.a = v[i]; - tqHandleCopyPut(pMeta, i, &foo, sizeof(Foo)); - tqHandleCommit(pMeta, i); - } - - for (int i = 0; i < 1000; i++) { - Foo* pFoo = (Foo*)tqHandleGet(pMeta, i); - ASSERT_EQ(pFoo != NULL, true) << " at idx " << i << "\n"; - EXPECT_EQ(pFoo->a, v[i]); - } -} - -TEST_F(TqMetaUpdateAppendTest, dupCommit) { - taosSeedRand(0); - std::vector v; - for (int i = 0; i < 1000; i++) { - v.push_back(taosRand()); - Foo foo; - foo.a = v[i]; - tqHandleCopyPut(pMeta, i, &foo, sizeof(Foo)); - } - - for (int i = 0; i < 1000; i++) { - int ret = tqHandleCommit(pMeta, i); - EXPECT_EQ(ret, 0); - ret = tqHandleCommit(pMeta, i); - EXPECT_EQ(ret, -1); - } - - for (int i = 0; i < 1000; i++) { - int ret = tqHandleCommit(pMeta, i); - EXPECT_EQ(ret, -1); - } - - for (int i = 0; i < 1000; i++) { - Foo* pFoo = (Foo*)tqHandleGet(pMeta, i); - ASSERT_EQ(pFoo != NULL, true) << " at idx " << i << "\n"; - EXPECT_EQ(pFoo->a, v[i]); - } -} diff --git a/source/libs/catalog/inc/catalogInt.h b/source/libs/catalog/inc/catalogInt.h index 857c7088523a919671c05c15eef75debdbffc0de..230949ab7fbf696b050d3e10d7e76c858612e52b 100644 --- a/source/libs/catalog/inc/catalogInt.h +++ b/source/libs/catalog/inc/catalogInt.h @@ -49,15 +49,28 @@ enum { }; enum { - CTG_ACT_UPDATE_VG = 0, - CTG_ACT_UPDATE_TBL, - CTG_ACT_REMOVE_DB, - CTG_ACT_REMOVE_STB, - CTG_ACT_REMOVE_TBL, - CTG_ACT_UPDATE_USER, - CTG_ACT_MAX + CTG_OP_UPDATE_VGROUP = 0, + CTG_OP_UPDATE_TB_META, + CTG_OP_DROP_DB_CACHE, + CTG_OP_DROP_STB_META, + CTG_OP_DROP_TB_META, + CTG_OP_UPDATE_USER, + CTG_OP_UPDATE_VG_EPSET, + CTG_OP_MAX }; +typedef enum { + CTG_TASK_GET_QNODE = 0, + CTG_TASK_GET_DB_VGROUP, + CTG_TASK_GET_DB_CFG, + CTG_TASK_GET_DB_INFO, + CTG_TASK_GET_TB_META, + CTG_TASK_GET_TB_HASH, + CTG_TASK_GET_INDEX, + CTG_TASK_GET_UDF, + CTG_TASK_GET_USER, +} CTG_TASK_TYPE; + typedef struct SCtgDebug { bool lockEnable; bool cacheEnable; @@ -66,6 +79,47 @@ typedef struct SCtgDebug { uint32_t showCachePeriodSec; } SCtgDebug; +typedef struct SCtgTbCacheInfo { + bool inCache; + uint64_t dbId; + uint64_t suid; + int32_t tbType; +} SCtgTbCacheInfo; + +typedef struct SCtgTbMetaCtx { + SCtgTbCacheInfo tbInfo; + SName* pName; + int32_t flag; +} SCtgTbMetaCtx; + +typedef struct SCtgDbVgCtx { + char dbFName[TSDB_DB_FNAME_LEN]; +} SCtgDbVgCtx; + +typedef struct SCtgDbCfgCtx { + char dbFName[TSDB_DB_FNAME_LEN]; +} SCtgDbCfgCtx; + +typedef struct SCtgDbInfoCtx { + char dbFName[TSDB_DB_FNAME_LEN]; +} SCtgDbInfoCtx; + +typedef struct SCtgTbHashCtx { + char dbFName[TSDB_DB_FNAME_LEN]; + SName* pName; +} SCtgTbHashCtx; + +typedef struct SCtgIndexCtx { + char indexFName[TSDB_INDEX_FNAME_LEN]; +} SCtgIndexCtx; + +typedef struct SCtgUdfCtx { + char udfName[TSDB_FUNC_NAME_LEN]; +} SCtgUdfCtx; + +typedef struct SCtgUserCtx { + SUserAuthInfo user; +} SCtgUserCtx; typedef struct SCtgTbMetaCache { SRWLatch stbLock; @@ -113,6 +167,56 @@ typedef struct SCatalog { SCtgRentMgmt stbRent; } SCatalog; +typedef struct SCtgJob { + int64_t refId; + SArray* pTasks; + int32_t taskDone; + SMetaData jobRes; + int32_t rspCode; + + uint64_t queryId; + SCatalog* pCtg; + void* pTrans; + SEpSet pMgmtEps; + void* userParam; + catalogCallback userFp; + int32_t tbMetaNum; + int32_t tbHashNum; + int32_t dbVgNum; + int32_t udfNum; + int32_t qnodeNum; + int32_t dbCfgNum; + int32_t indexNum; + int32_t userNum; + int32_t dbInfoNum; +} SCtgJob; + +typedef struct SCtgMsgCtx { + int32_t reqType; + void* lastOut; + void* out; + char* target; +} SCtgMsgCtx; + +typedef struct SCtgTask { + CTG_TASK_TYPE type; + int32_t taskId; + SCtgJob *pJob; + void* taskCtx; + SCtgMsgCtx msgCtx; + void* res; +} SCtgTask; + +typedef int32_t (*ctgLanchTaskFp)(SCtgTask*); +typedef int32_t (*ctgHandleTaskMsgRspFp)(SCtgTask*, int32_t, const SDataBuf *, int32_t); +typedef int32_t (*ctgDumpTaskResFp)(SCtgTask*); + +typedef struct SCtgAsyncFps { + ctgLanchTaskFp launchFp; + ctgHandleTaskMsgRspFp handleRspFp; + ctgDumpTaskResFp dumpResFp; +} SCtgAsyncFps; + typedef struct SCtgApiStat { #ifdef WINDOWS @@ -188,16 +292,22 @@ typedef struct SCtgUpdateUserMsg { SGetUserAuthRsp userAuth; } SCtgUpdateUserMsg; +typedef struct SCtgUpdateEpsetMsg { + SCatalog* pCtg; + char dbFName[TSDB_DB_FNAME_LEN]; + int32_t vgId; + SEpSet epSet; +} SCtgUpdateEpsetMsg; -typedef struct SCtgMetaAction { - int32_t act; +typedef struct SCtgCacheOperation { + int32_t opId; void *data; bool syncReq; uint64_t seqId; -} SCtgMetaAction; +} SCtgCacheOperation; typedef struct SCtgQNode { - SCtgMetaAction action; + SCtgCacheOperation op; struct SCtgQNode *next; } SCtgQNode; @@ -214,6 +324,7 @@ typedef struct SCtgQueue { typedef struct SCatalogMgmt { bool exit; + int32_t jobPool; SRWLatch lock; SCtgQueue queue; TdThread updateThread; @@ -223,13 +334,13 @@ typedef struct SCatalogMgmt { } SCatalogMgmt; typedef uint32_t (*tableNameHashFp)(const char *, uint32_t); -typedef int32_t (*ctgActFunc)(SCtgMetaAction *); +typedef int32_t (*ctgOpFunc)(SCtgCacheOperation *); -typedef struct SCtgAction { - int32_t actId; +typedef struct SCtgOperation { + int32_t opId; char name[32]; - ctgActFunc func; -} SCtgAction; + ctgOpFunc func; +} SCtgOperation; #define CTG_QUEUE_ADD() atomic_add_fetch_64(&gCtgMgmt.queue.qRemainNum, 1) #define CTG_QUEUE_SUB() atomic_sub_fetch_64(&gCtgMgmt.queue.qRemainNum, 1) @@ -327,10 +438,82 @@ typedef struct SCtgAction { #define CTG_API_LEAVE(c) do { int32_t __code = c; CTG_UNLOCK(CTG_READ, &gCtgMgmt.lock); CTG_API_DEBUG("CTG API leave %s", __FUNCTION__); CTG_RET(__code); } while (0) #define CTG_API_ENTER() do { CTG_API_DEBUG("CTG API enter %s", __FUNCTION__); CTG_LOCK(CTG_READ, &gCtgMgmt.lock); if (atomic_load_8((int8_t*)&gCtgMgmt.exit)) { CTG_API_LEAVE(TSDB_CODE_CTG_OUT_OF_SERVICE); } } while (0) - -extern void ctgdShowTableMeta(SCatalog* pCtg, const char *tbName, STableMeta* p); -extern void ctgdShowClusterCache(SCatalog* pCtg); -extern int32_t ctgdShowCacheInfo(void); +#define CTG_PARAMS SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps +#define CTG_PARAMS_LIST() pCtg, pTrans, pMgmtEps + +void ctgdShowTableMeta(SCatalog* pCtg, const char *tbName, STableMeta* p); +void ctgdShowClusterCache(SCatalog* pCtg); +int32_t ctgdShowCacheInfo(void); + +int32_t ctgRemoveTbMetaFromCache(SCatalog* pCtg, SName* pTableName, bool syncReq); +int32_t ctgGetTbMetaFromCache(CTG_PARAMS, SCtgTbMetaCtx* ctx, STableMeta** pTableMeta); + +int32_t ctgOpUpdateVgroup(SCtgCacheOperation *action); +int32_t ctgOpUpdateTbMeta(SCtgCacheOperation *action); +int32_t ctgOpDropDbCache(SCtgCacheOperation *action); +int32_t ctgOpDropStbMeta(SCtgCacheOperation *action); +int32_t ctgOpDropTbMeta(SCtgCacheOperation *action); +int32_t ctgOpUpdateUser(SCtgCacheOperation *action); +int32_t ctgOpUpdateEpset(SCtgCacheOperation *operation); +int32_t ctgAcquireVgInfoFromCache(SCatalog* pCtg, const char *dbFName, SCtgDBCache **pCache); +void ctgReleaseDBCache(SCatalog *pCtg, SCtgDBCache *dbCache); +void ctgReleaseVgInfo(SCtgDBCache *dbCache); +int32_t ctgAcquireVgInfoFromCache(SCatalog* pCtg, const char *dbFName, SCtgDBCache **pCache); +int32_t ctgTbMetaExistInCache(SCatalog* pCtg, char *dbFName, char* tbName, int32_t *exist); +int32_t ctgReadTbMetaFromCache(SCatalog* pCtg, SCtgTbMetaCtx* ctx, STableMeta** pTableMeta); +int32_t ctgReadTbVerFromCache(SCatalog *pCtg, const SName *pTableName, int32_t *sver, int32_t *tver, int32_t *tbType, uint64_t *suid, char *stbName); +int32_t ctgChkAuthFromCache(SCatalog* pCtg, const char* user, const char* dbFName, AUTH_TYPE type, bool *inCache, bool *pass); +int32_t ctgDropDbCacheEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId); +int32_t ctgDropStbMetaEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *stbName, uint64_t suid, bool syncReq); +int32_t ctgDropTbMetaEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *tbName, bool syncReq); +int32_t ctgUpdateVgroupEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, SDBVgInfo* dbInfo, bool syncReq); +int32_t ctgUpdateTbMetaEnqueue(SCatalog* pCtg, STableMetaOutput *output, bool syncReq); +int32_t ctgUpdateUserEnqueue(SCatalog* pCtg, SGetUserAuthRsp *pAuth, bool syncReq); +int32_t ctgUpdateVgEpsetEnqueue(SCatalog* pCtg, char *dbFName, int32_t vgId, SEpSet* pEpSet); +int32_t ctgMetaRentInit(SCtgRentMgmt *mgmt, uint32_t rentSec, int8_t type); +int32_t ctgMetaRentAdd(SCtgRentMgmt *mgmt, void *meta, int64_t id, int32_t size); +int32_t ctgMetaRentGet(SCtgRentMgmt *mgmt, void **res, uint32_t *num, int32_t size); +int32_t ctgUpdateTbMetaToCache(SCatalog* pCtg, STableMetaOutput* pOut, bool syncReq); +int32_t ctgStartUpdateThread(); +int32_t ctgRelaunchGetTbMetaTask(SCtgTask *pTask); + + + +int32_t ctgProcessRspMsg(void* out, int32_t reqType, char* msg, int32_t msgSize, int32_t rspCode, char* target); +int32_t ctgGetDBVgInfoFromMnode(CTG_PARAMS, SBuildUseDBInput *input, SUseDbOutput *out, SCtgTask* pTask); +int32_t ctgGetQnodeListFromMnode(CTG_PARAMS, SArray *out, SCtgTask* pTask); +int32_t ctgGetDBCfgFromMnode(CTG_PARAMS, const char *dbFName, SDbCfgInfo *out, SCtgTask* pTask); +int32_t ctgGetIndexInfoFromMnode(CTG_PARAMS, const char *indexName, SIndexInfo *out, SCtgTask* pTask); +int32_t ctgGetUdfInfoFromMnode(CTG_PARAMS, const char *funcName, SFuncInfo *out, SCtgTask* pTask); +int32_t ctgGetUserDbAuthFromMnode(CTG_PARAMS, const char *user, SGetUserAuthRsp *out, SCtgTask* pTask); +int32_t ctgGetTbMetaFromMnodeImpl(CTG_PARAMS, char *dbFName, char* tbName, STableMetaOutput* out, SCtgTask* pTask); +int32_t ctgGetTbMetaFromMnode(CTG_PARAMS, const SName* pTableName, STableMetaOutput* out, SCtgTask* pTask); +int32_t ctgGetTbMetaFromVnode(CTG_PARAMS, const SName* pTableName, SVgroupInfo *vgroupInfo, STableMetaOutput* out, SCtgTask* pTask); + +int32_t ctgInitJob(CTG_PARAMS, SCtgJob** job, uint64_t reqId, const SCatalogReq* pReq, catalogCallback fp, void* param); +int32_t ctgLaunchJob(SCtgJob *pJob); +int32_t ctgMakeAsyncRes(SCtgJob *pJob); + +int32_t ctgCloneVgInfo(SDBVgInfo *src, SDBVgInfo **dst); +int32_t ctgCloneMetaOutput(STableMetaOutput *output, STableMetaOutput **pOutput); +int32_t ctgGenerateVgList(SCatalog *pCtg, SHashObj *vgHash, SArray** pList); +void ctgFreeJob(void* job); +void ctgFreeHandle(SCatalog* pCtg); +void ctgFreeVgInfo(SDBVgInfo *vgInfo); +int32_t ctgGetVgInfoFromHashValue(SCatalog *pCtg, SDBVgInfo *dbInfo, const SName *pTableName, SVgroupInfo *pVgroup); +void ctgResetTbMetaTask(SCtgTask* pTask); +void ctgFreeDbCache(SCtgDBCache *dbCache); +int32_t ctgStbVersionSortCompare(const void* key1, const void* key2); +int32_t ctgDbVgVersionSortCompare(const void* key1, const void* key2); +int32_t ctgStbVersionSearchCompare(const void* key1, const void* key2); +int32_t ctgDbVgVersionSearchCompare(const void* key1, const void* key2); +void ctgFreeSTableMetaOutput(STableMetaOutput* pOutput); +int32_t ctgUpdateMsgCtx(SCtgMsgCtx* pCtx, int32_t reqType, void* out, char* target); + + +extern SCatalogMgmt gCtgMgmt; +extern SCtgDebug gCTGDebug; +extern SCtgAsyncFps gCtgAsyncFps[]; #ifdef __cplusplus } diff --git a/source/libs/executor/inc/indexoperator.h b/source/libs/catalog/inc/ctgRemote.h similarity index 66% rename from source/libs/executor/inc/indexoperator.h rename to source/libs/catalog/inc/ctgRemote.h index d033c63ef8c9e6adb2685182103ed918299e1d66..cd88863c1b9b306b264cd05986f0a7e3a6b814d8 100644 --- a/source/libs/executor/inc/indexoperator.h +++ b/source/libs/catalog/inc/ctgRemote.h @@ -13,23 +13,23 @@ * along with this program. If not, see . */ -#ifndef _INDEX_OPERATOR_H -#define _INDEX_OPERATOR_H +#ifndef _TD_CATALOG_REMOTE_H_ +#define _TD_CATALOG_REMOTE_H_ #ifdef __cplusplus extern "C" { #endif -#include "nodes.h" -#include "tglobal.h" -typedef enum { SFLT_NOT_INDEX, SFLT_COARSE_INDEX, SFLT_ACCURATE_INDEX } SIdxFltStatus; +typedef struct SCtgTaskCallbackParam { + uint64_t queryId; + int64_t refId; + uint64_t taskId; + int32_t reqType; +} SCtgTaskCallbackParam; -SIdxFltStatus idxGetFltStatus(SNode *pFilterNode); -// construct tag filter operator later -int32_t doFilterTag(const SNode *pFilterNode, SArray *result); #ifdef __cplusplus } #endif -#endif /*INDEX_OPERATOR_*/ +#endif /*_TD_CATALOG_REMOTE_H_*/ diff --git a/source/libs/catalog/src/catalog.c b/source/libs/catalog/src/catalog.c index 2d4355191ef73d801cc7718f45ea8191b1aa7232..6519440dad3c7711057f3fd1e203b328ed263a52 100644 --- a/source/libs/catalog/src/catalog.c +++ b/source/libs/catalog/src/catalog.c @@ -13,1795 +13,66 @@ * along with this program. If not, see . */ -#include "catalogInt.h" +#include "trpc.h" #include "query.h" -#include "systable.h" #include "tname.h" -#include "trpc.h" - -int32_t ctgActUpdateVg(SCtgMetaAction *action); -int32_t ctgActUpdateTbl(SCtgMetaAction *action); -int32_t ctgActRemoveDB(SCtgMetaAction *action); -int32_t ctgActRemoveStb(SCtgMetaAction *action); -int32_t ctgActRemoveTbl(SCtgMetaAction *action); -int32_t ctgActUpdateUser(SCtgMetaAction *action); - -extern SCtgDebug gCTGDebug; -SCatalogMgmt gCtgMgmt = {0}; -SCtgAction gCtgAction[CTG_ACT_MAX] = { - {CTG_ACT_UPDATE_VG, "update vgInfo", ctgActUpdateVg}, {CTG_ACT_UPDATE_TBL, "update tbMeta", ctgActUpdateTbl}, - {CTG_ACT_REMOVE_DB, "remove DB", ctgActRemoveDB}, {CTG_ACT_REMOVE_STB, "remove stbMeta", ctgActRemoveStb}, - {CTG_ACT_REMOVE_TBL, "remove tbMeta", ctgActRemoveTbl}, {CTG_ACT_UPDATE_USER, "update user", ctgActUpdateUser}}; - -void ctgFreeMetaRent(SCtgRentMgmt *mgmt) { - if (NULL == mgmt->slots) { - return; - } - - for (int32_t i = 0; i < mgmt->slotNum; ++i) { - SCtgRentSlot *slot = &mgmt->slots[i]; - if (slot->meta) { - taosArrayDestroy(slot->meta); - slot->meta = NULL; - } - } - - taosMemoryFreeClear(mgmt->slots); -} - -void ctgFreeTableMetaCache(SCtgTbMetaCache *cache) { - CTG_LOCK(CTG_WRITE, &cache->stbLock); - if (cache->stbCache) { - int32_t stblNum = taosHashGetSize(cache->stbCache); - taosHashCleanup(cache->stbCache); - cache->stbCache = NULL; - CTG_CACHE_STAT_SUB(stblNum, stblNum); - } - CTG_UNLOCK(CTG_WRITE, &cache->stbLock); - - CTG_LOCK(CTG_WRITE, &cache->metaLock); - if (cache->metaCache) { - int32_t tblNum = taosHashGetSize(cache->metaCache); - taosHashCleanup(cache->metaCache); - cache->metaCache = NULL; - CTG_CACHE_STAT_SUB(tblNum, tblNum); - } - CTG_UNLOCK(CTG_WRITE, &cache->metaLock); -} - -void ctgFreeVgInfo(SDBVgInfo *vgInfo) { - if (NULL == vgInfo) { - return; - } - - if (vgInfo->vgHash) { - taosHashCleanup(vgInfo->vgHash); - vgInfo->vgHash = NULL; - } - - taosMemoryFreeClear(vgInfo); -} - -void ctgFreeDbCache(SCtgDBCache *dbCache) { - if (NULL == dbCache) { - return; - } - - CTG_LOCK(CTG_WRITE, &dbCache->vgLock); - ctgFreeVgInfo(dbCache->vgInfo); - CTG_UNLOCK(CTG_WRITE, &dbCache->vgLock); - - ctgFreeTableMetaCache(&dbCache->tbCache); -} - -void ctgFreeSCtgUserAuth(SCtgUserAuth *userCache) { - taosHashCleanup(userCache->createdDbs); - taosHashCleanup(userCache->readDbs); - taosHashCleanup(userCache->writeDbs); -} - -void ctgFreeHandle(SCatalog *pCtg) { - ctgFreeMetaRent(&pCtg->dbRent); - ctgFreeMetaRent(&pCtg->stbRent); - - if (pCtg->dbCache) { - int32_t dbNum = taosHashGetSize(pCtg->dbCache); - - void *pIter = taosHashIterate(pCtg->dbCache, NULL); - while (pIter) { - SCtgDBCache *dbCache = pIter; - - atomic_store_8(&dbCache->deleted, 1); - - ctgFreeDbCache(dbCache); - - pIter = taosHashIterate(pCtg->dbCache, pIter); - } - - taosHashCleanup(pCtg->dbCache); - - CTG_CACHE_STAT_SUB(dbNum, dbNum); - } - - if (pCtg->userCache) { - int32_t userNum = taosHashGetSize(pCtg->userCache); - - void *pIter = taosHashIterate(pCtg->userCache, NULL); - while (pIter) { - SCtgUserAuth *userCache = pIter; - - ctgFreeSCtgUserAuth(userCache); - - pIter = taosHashIterate(pCtg->userCache, pIter); - } - - taosHashCleanup(pCtg->userCache); - - CTG_CACHE_STAT_SUB(userNum, userNum); - } - - taosMemoryFree(pCtg); -} - -void ctgWaitAction(SCtgMetaAction *action) { - while (true) { - tsem_wait(&gCtgMgmt.queue.rspSem); - - if (atomic_load_8((int8_t *)&gCtgMgmt.exit)) { - tsem_post(&gCtgMgmt.queue.rspSem); - break; - } - - if (gCtgMgmt.queue.seqDone >= action->seqId) { - break; - } - - tsem_post(&gCtgMgmt.queue.rspSem); - sched_yield(); - } -} - -void ctgPopAction(SCtgMetaAction **action) { - SCtgQNode *orig = gCtgMgmt.queue.head; - - SCtgQNode *node = gCtgMgmt.queue.head->next; - gCtgMgmt.queue.head = gCtgMgmt.queue.head->next; - - CTG_QUEUE_SUB(); - - taosMemoryFreeClear(orig); - - *action = &node->action; -} - -int32_t ctgPushAction(SCatalog *pCtg, SCtgMetaAction *action) { - SCtgQNode *node = taosMemoryCalloc(1, sizeof(SCtgQNode)); - if (NULL == node) { - qError("calloc %d failed", (int32_t)sizeof(SCtgQNode)); - CTG_RET(TSDB_CODE_CTG_MEM_ERROR); - } - - action->seqId = atomic_add_fetch_64(&gCtgMgmt.queue.seqId, 1); - - node->action = *action; - - CTG_LOCK(CTG_WRITE, &gCtgMgmt.queue.qlock); - gCtgMgmt.queue.tail->next = node; - gCtgMgmt.queue.tail = node; - CTG_UNLOCK(CTG_WRITE, &gCtgMgmt.queue.qlock); - - CTG_QUEUE_ADD(); - CTG_RUNTIME_STAT_ADD(qNum, 1); - - tsem_post(&gCtgMgmt.queue.reqSem); - - ctgDebug("action [%s] added into queue", gCtgAction[action->act].name); - - if (action->syncReq) { - ctgWaitAction(action); - } - - return TSDB_CODE_SUCCESS; -} - -int32_t ctgPushRmDBMsgInQueue(SCatalog *pCtg, const char *dbFName, int64_t dbId) { - int32_t code = 0; - SCtgMetaAction action = {.act = CTG_ACT_REMOVE_DB}; - SCtgRemoveDBMsg *msg = taosMemoryMalloc(sizeof(SCtgRemoveDBMsg)); - if (NULL == msg) { - ctgError("malloc %d failed", (int32_t)sizeof(SCtgRemoveDBMsg)); - CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); - } - - char *p = strchr(dbFName, '.'); - if (p && CTG_IS_SYS_DBNAME(p + 1)) { - dbFName = p + 1; - } - - msg->pCtg = pCtg; - strncpy(msg->dbFName, dbFName, sizeof(msg->dbFName)); - msg->dbId = dbId; - - action.data = msg; - - CTG_ERR_JRET(ctgPushAction(pCtg, &action)); - - return TSDB_CODE_SUCCESS; - -_return: - - taosMemoryFreeClear(action.data); - CTG_RET(code); -} - -int32_t ctgPushRmStbMsgInQueue(SCatalog *pCtg, const char *dbFName, int64_t dbId, const char *stbName, uint64_t suid, - bool syncReq) { - int32_t code = 0; - SCtgMetaAction action = {.act = CTG_ACT_REMOVE_STB, .syncReq = syncReq}; - SCtgRemoveStbMsg *msg = taosMemoryMalloc(sizeof(SCtgRemoveStbMsg)); - if (NULL == msg) { - ctgError("malloc %d failed", (int32_t)sizeof(SCtgRemoveStbMsg)); - CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); - } - - msg->pCtg = pCtg; - strncpy(msg->dbFName, dbFName, sizeof(msg->dbFName)); - strncpy(msg->stbName, stbName, sizeof(msg->stbName)); - msg->dbId = dbId; - msg->suid = suid; - - action.data = msg; - - CTG_ERR_JRET(ctgPushAction(pCtg, &action)); - - return TSDB_CODE_SUCCESS; - -_return: - - taosMemoryFreeClear(action.data); - CTG_RET(code); -} - -int32_t ctgPushRmTblMsgInQueue(SCatalog *pCtg, const char *dbFName, int64_t dbId, const char *tbName, bool syncReq) { - int32_t code = 0; - SCtgMetaAction action = {.act = CTG_ACT_REMOVE_TBL, .syncReq = syncReq}; - SCtgRemoveTblMsg *msg = taosMemoryMalloc(sizeof(SCtgRemoveTblMsg)); - if (NULL == msg) { - ctgError("malloc %d failed", (int32_t)sizeof(SCtgRemoveTblMsg)); - CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); - } - - msg->pCtg = pCtg; - strncpy(msg->dbFName, dbFName, sizeof(msg->dbFName)); - strncpy(msg->tbName, tbName, sizeof(msg->tbName)); - msg->dbId = dbId; - - action.data = msg; - - CTG_ERR_JRET(ctgPushAction(pCtg, &action)); - - return TSDB_CODE_SUCCESS; - -_return: - - taosMemoryFreeClear(action.data); - CTG_RET(code); -} - -int32_t ctgPushUpdateVgMsgInQueue(SCatalog *pCtg, const char *dbFName, int64_t dbId, SDBVgInfo *dbInfo, bool syncReq) { - int32_t code = 0; - SCtgMetaAction action = {.act = CTG_ACT_UPDATE_VG, .syncReq = syncReq}; - SCtgUpdateVgMsg *msg = taosMemoryMalloc(sizeof(SCtgUpdateVgMsg)); - if (NULL == msg) { - ctgError("malloc %d failed", (int32_t)sizeof(SCtgUpdateVgMsg)); - ctgFreeVgInfo(dbInfo); - CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); - } - - char *p = strchr(dbFName, '.'); - if (p && CTG_IS_SYS_DBNAME(p + 1)) { - dbFName = p + 1; - } - - strncpy(msg->dbFName, dbFName, sizeof(msg->dbFName)); - msg->pCtg = pCtg; - msg->dbId = dbId; - msg->dbInfo = dbInfo; - - action.data = msg; - - CTG_ERR_JRET(ctgPushAction(pCtg, &action)); - - return TSDB_CODE_SUCCESS; - -_return: - - ctgFreeVgInfo(dbInfo); - taosMemoryFreeClear(action.data); - CTG_RET(code); -} - -int32_t ctgPushUpdateTblMsgInQueue(SCatalog *pCtg, STableMetaOutput *output, bool syncReq) { - int32_t code = 0; - SCtgMetaAction action = {.act = CTG_ACT_UPDATE_TBL, .syncReq = syncReq}; - SCtgUpdateTblMsg *msg = taosMemoryMalloc(sizeof(SCtgUpdateTblMsg)); - if (NULL == msg) { - ctgError("malloc %d failed", (int32_t)sizeof(SCtgUpdateTblMsg)); - CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); - } - - char *p = strchr(output->dbFName, '.'); - if (p && CTG_IS_SYS_DBNAME(p + 1)) { - memmove(output->dbFName, p + 1, strlen(p + 1)); - } - - msg->pCtg = pCtg; - msg->output = output; - - action.data = msg; - - CTG_ERR_JRET(ctgPushAction(pCtg, &action)); - - return TSDB_CODE_SUCCESS; - -_return: - - taosMemoryFreeClear(msg); - - CTG_RET(code); -} - -int32_t ctgPushUpdateUserMsgInQueue(SCatalog *pCtg, SGetUserAuthRsp *pAuth, bool syncReq) { - int32_t code = 0; - SCtgMetaAction action = {.act = CTG_ACT_UPDATE_USER, .syncReq = syncReq}; - SCtgUpdateUserMsg *msg = taosMemoryMalloc(sizeof(SCtgUpdateUserMsg)); - if (NULL == msg) { - ctgError("malloc %d failed", (int32_t)sizeof(SCtgUpdateUserMsg)); - CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); - } - - msg->pCtg = pCtg; - msg->userAuth = *pAuth; - - action.data = msg; - - CTG_ERR_JRET(ctgPushAction(pCtg, &action)); - - return TSDB_CODE_SUCCESS; - -_return: - - tFreeSGetUserAuthRsp(pAuth); - taosMemoryFreeClear(msg); - - CTG_RET(code); -} - -int32_t ctgAcquireVgInfo(SCatalog *pCtg, SCtgDBCache *dbCache, bool *inCache) { - CTG_LOCK(CTG_READ, &dbCache->vgLock); - - if (dbCache->deleted) { - CTG_UNLOCK(CTG_READ, &dbCache->vgLock); - - ctgDebug("db is dropping, dbId:%" PRIx64, dbCache->dbId); - - *inCache = false; - return TSDB_CODE_SUCCESS; - } - - if (NULL == dbCache->vgInfo) { - CTG_UNLOCK(CTG_READ, &dbCache->vgLock); - - *inCache = false; - ctgDebug("db vgInfo is empty, dbId:%" PRIx64, dbCache->dbId); - return TSDB_CODE_SUCCESS; - } - - *inCache = true; - - return TSDB_CODE_SUCCESS; -} - -int32_t ctgWAcquireVgInfo(SCatalog *pCtg, SCtgDBCache *dbCache) { - CTG_LOCK(CTG_WRITE, &dbCache->vgLock); - - if (dbCache->deleted) { - ctgDebug("db is dropping, dbId:%" PRIx64, dbCache->dbId); - CTG_UNLOCK(CTG_WRITE, &dbCache->vgLock); - CTG_ERR_RET(TSDB_CODE_CTG_DB_DROPPED); - } - - return TSDB_CODE_SUCCESS; -} - -void ctgReleaseDBCache(SCatalog *pCtg, SCtgDBCache *dbCache) { taosHashRelease(pCtg->dbCache, dbCache); } - -void ctgReleaseVgInfo(SCtgDBCache *dbCache) { CTG_UNLOCK(CTG_READ, &dbCache->vgLock); } - -void ctgWReleaseVgInfo(SCtgDBCache *dbCache) { CTG_UNLOCK(CTG_WRITE, &dbCache->vgLock); } - -int32_t ctgAcquireDBCacheImpl(SCatalog *pCtg, const char *dbFName, SCtgDBCache **pCache, bool acquire) { - char *p = strchr(dbFName, '.'); - if (p && CTG_IS_SYS_DBNAME(p + 1)) { - dbFName = p + 1; - } - - SCtgDBCache *dbCache = NULL; - if (acquire) { - dbCache = (SCtgDBCache *)taosHashAcquire(pCtg->dbCache, dbFName, strlen(dbFName)); - } else { - dbCache = (SCtgDBCache *)taosHashGet(pCtg->dbCache, dbFName, strlen(dbFName)); - } - - if (NULL == dbCache) { - *pCache = NULL; - ctgDebug("db not in cache, dbFName:%s", dbFName); - return TSDB_CODE_SUCCESS; - } - - if (dbCache->deleted) { - if (acquire) { - ctgReleaseDBCache(pCtg, dbCache); - } - - *pCache = NULL; - ctgDebug("db is removing from cache, dbFName:%s", dbFName); - return TSDB_CODE_SUCCESS; - } - - *pCache = dbCache; - - return TSDB_CODE_SUCCESS; -} - -int32_t ctgAcquireDBCache(SCatalog *pCtg, const char *dbFName, SCtgDBCache **pCache) { - CTG_RET(ctgAcquireDBCacheImpl(pCtg, dbFName, pCache, true)); -} - -int32_t ctgGetDBCache(SCatalog *pCtg, const char *dbFName, SCtgDBCache **pCache) { - CTG_RET(ctgAcquireDBCacheImpl(pCtg, dbFName, pCache, false)); -} - -int32_t ctgAcquireVgInfoFromCache(SCatalog *pCtg, const char *dbFName, SCtgDBCache **pCache, bool *inCache) { - SCtgDBCache *dbCache = NULL; - - if (NULL == pCtg->dbCache) { - ctgDebug("empty db cache, dbFName:%s", dbFName); - goto _return; - } - - ctgAcquireDBCache(pCtg, dbFName, &dbCache); - if (NULL == dbCache) { - ctgDebug("db %s not in cache", dbFName); - goto _return; - } - - ctgAcquireVgInfo(pCtg, dbCache, inCache); - if (!(*inCache)) { - ctgDebug("vgInfo of db %s not in cache", dbFName); - goto _return; - } - - *pCache = dbCache; - *inCache = true; - - CTG_CACHE_STAT_ADD(vgHitNum, 1); - - ctgDebug("Got db vgInfo from cache, dbFName:%s", dbFName); - - return TSDB_CODE_SUCCESS; - -_return: - - if (dbCache) { - ctgReleaseDBCache(pCtg, dbCache); - } - - *pCache = NULL; - *inCache = false; - - CTG_CACHE_STAT_ADD(vgMissNum, 1); - - return TSDB_CODE_SUCCESS; -} - -int32_t ctgGetQnodeListFromMnode(SCatalog *pCtg, void *pRpc, const SEpSet *pMgmtEps, SArray *out) { - char *msg = NULL; - int32_t msgLen = 0; - - ctgDebug("try to get qnode list from mnode, mgmtEpInUse:%d", pMgmtEps->inUse); - - int32_t code = queryBuildMsg[TMSG_INDEX(TDMT_MND_QNODE_LIST)](NULL, &msg, 0, &msgLen); - if (code) { - ctgError("Build qnode list msg failed, error:%s", tstrerror(code)); - CTG_ERR_RET(code); - } - - SRpcMsg rpcMsg = { - .msgType = TDMT_MND_QNODE_LIST, - .pCont = msg, - .contLen = msgLen, - }; - - SRpcMsg rpcRsp = {0}; - - rpcSendRecv(pRpc, (SEpSet *)pMgmtEps, &rpcMsg, &rpcRsp); - if (TSDB_CODE_SUCCESS != rpcRsp.code) { - ctgError("error rsp for qnode list, error:%s", tstrerror(rpcRsp.code)); - CTG_ERR_RET(rpcRsp.code); - } - - code = queryProcessMsgRsp[TMSG_INDEX(TDMT_MND_QNODE_LIST)](out, rpcRsp.pCont, rpcRsp.contLen); - if (code) { - ctgError("Process qnode list rsp failed, error:%s", tstrerror(rpcRsp.code)); - CTG_ERR_RET(code); - } - - ctgDebug("Got qnode list from mnode, listNum:%d", (int32_t)taosArrayGetSize(out)); - - return TSDB_CODE_SUCCESS; -} - -int32_t ctgGetDBVgInfoFromMnode(SCatalog *pCtg, void *pRpc, const SEpSet *pMgmtEps, SBuildUseDBInput *input, - SUseDbOutput *out) { - char *msg = NULL; - int32_t msgLen = 0; - - ctgDebug("try to get db vgInfo from mnode, dbFName:%s", input->db); - - int32_t code = queryBuildMsg[TMSG_INDEX(TDMT_MND_USE_DB)](input, &msg, 0, &msgLen); - if (code) { - ctgError("Build use db msg failed, code:%x, db:%s", code, input->db); - CTG_ERR_RET(code); - } - - SRpcMsg rpcMsg = { - .msgType = TDMT_MND_USE_DB, - .pCont = msg, - .contLen = msgLen, - }; - - SRpcMsg rpcRsp = {0}; - - rpcSendRecv(pRpc, (SEpSet *)pMgmtEps, &rpcMsg, &rpcRsp); - if (TSDB_CODE_SUCCESS != rpcRsp.code) { - ctgError("error rsp for use db, error:%s, db:%s", tstrerror(rpcRsp.code), input->db); - CTG_ERR_RET(rpcRsp.code); - } - - code = queryProcessMsgRsp[TMSG_INDEX(TDMT_MND_USE_DB)](out, rpcRsp.pCont, rpcRsp.contLen); - if (code) { - ctgError("Process use db rsp failed, code:%x, db:%s", code, input->db); - CTG_ERR_RET(code); - } - - ctgDebug("Got db vgInfo from mnode, dbFName:%s", input->db); - - return TSDB_CODE_SUCCESS; -} - -int32_t ctgGetDBCfgFromMnode(SCatalog *pCtg, void *pRpc, const SEpSet *pMgmtEps, const char *dbFName, SDbCfgInfo *out) { - char *msg = NULL; - int32_t msgLen = 0; - - ctgDebug("try to get db cfg from mnode, dbFName:%s", dbFName); - - int32_t code = queryBuildMsg[TMSG_INDEX(TDMT_MND_GET_DB_CFG)]((void *)dbFName, &msg, 0, &msgLen); - if (code) { - ctgError("Build get db cfg msg failed, code:%x, db:%s", code, dbFName); - CTG_ERR_RET(code); - } - - SRpcMsg rpcMsg = { - .msgType = TDMT_MND_GET_DB_CFG, - .pCont = msg, - .contLen = msgLen, - }; - - SRpcMsg rpcRsp = {0}; - - rpcSendRecv(pRpc, (SEpSet *)pMgmtEps, &rpcMsg, &rpcRsp); - if (TSDB_CODE_SUCCESS != rpcRsp.code) { - ctgError("error rsp for get db cfg, error:%s, db:%s", tstrerror(rpcRsp.code), dbFName); - CTG_ERR_RET(rpcRsp.code); - } - - code = queryProcessMsgRsp[TMSG_INDEX(TDMT_MND_GET_DB_CFG)](out, rpcRsp.pCont, rpcRsp.contLen); - if (code) { - ctgError("Process get db cfg rsp failed, code:%x, db:%s", code, dbFName); - CTG_ERR_RET(code); - } - - ctgDebug("Got db cfg from mnode, dbFName:%s", dbFName); - - return TSDB_CODE_SUCCESS; -} - -int32_t ctgGetIndexInfoFromMnode(SCatalog *pCtg, void *pRpc, const SEpSet *pMgmtEps, const char *indexName, - SIndexInfo *out) { - char *msg = NULL; - int32_t msgLen = 0; - - ctgDebug("try to get index from mnode, indexName:%s", indexName); - - int32_t code = queryBuildMsg[TMSG_INDEX(TDMT_MND_GET_INDEX)]((void *)indexName, &msg, 0, &msgLen); - if (code) { - ctgError("Build get index msg failed, code:%x, db:%s", code, indexName); - CTG_ERR_RET(code); - } - - SRpcMsg rpcMsg = { - .msgType = TDMT_MND_GET_INDEX, - .pCont = msg, - .contLen = msgLen, - }; - - SRpcMsg rpcRsp = {0}; - - rpcSendRecv(pRpc, (SEpSet *)pMgmtEps, &rpcMsg, &rpcRsp); - if (TSDB_CODE_SUCCESS != rpcRsp.code) { - ctgError("error rsp for get index, error:%s, indexName:%s", tstrerror(rpcRsp.code), indexName); - CTG_ERR_RET(rpcRsp.code); - } - - code = queryProcessMsgRsp[TMSG_INDEX(TDMT_MND_GET_INDEX)](out, rpcRsp.pCont, rpcRsp.contLen); - if (code) { - ctgError("Process get index rsp failed, code:%x, indexName:%s", code, indexName); - CTG_ERR_RET(code); - } - - ctgDebug("Got index from mnode, indexName:%s", indexName); - - return TSDB_CODE_SUCCESS; -} - -int32_t ctgGetUdfInfoFromMnode(SCatalog *pCtg, void *pRpc, const SEpSet *pMgmtEps, const char *funcName, - SFuncInfo **out) { - char *msg = NULL; - int32_t msgLen = 0; - - ctgDebug("try to get udf info from mnode, funcName:%s", funcName); - - int32_t code = queryBuildMsg[TMSG_INDEX(TDMT_MND_RETRIEVE_FUNC)]((void *)funcName, &msg, 0, &msgLen); - if (code) { - ctgError("Build get udf msg failed, code:%x, db:%s", code, funcName); - CTG_ERR_RET(code); - } - - SRpcMsg rpcMsg = { - .msgType = TDMT_MND_RETRIEVE_FUNC, - .pCont = msg, - .contLen = msgLen, - }; - - SRpcMsg rpcRsp = {0}; - - rpcSendRecv(pRpc, (SEpSet *)pMgmtEps, &rpcMsg, &rpcRsp); - if (TSDB_CODE_SUCCESS != rpcRsp.code) { - if (TSDB_CODE_MND_FUNC_NOT_EXIST == rpcRsp.code) { - ctgDebug("funcName %s not exist in mnode", funcName); - taosMemoryFreeClear(*out); - CTG_RET(TSDB_CODE_SUCCESS); - } - - ctgError("error rsp for get udf, error:%s, funcName:%s", tstrerror(rpcRsp.code), funcName); - CTG_ERR_RET(rpcRsp.code); - } - - code = queryProcessMsgRsp[TMSG_INDEX(TDMT_MND_RETRIEVE_FUNC)](*out, rpcRsp.pCont, rpcRsp.contLen); - if (code) { - ctgError("Process get udf rsp failed, code:%x, funcName:%s", code, funcName); - CTG_ERR_RET(code); - } - - ctgDebug("Got udf from mnode, funcName:%s", funcName); - - return TSDB_CODE_SUCCESS; -} - -int32_t ctgGetUserDbAuthFromMnode(SCatalog *pCtg, void *pRpc, const SEpSet *pMgmtEps, const char *user, - SGetUserAuthRsp *authRsp) { - char *msg = NULL; - int32_t msgLen = 0; - - ctgDebug("try to get user auth from mnode, user:%s", user); - - int32_t code = queryBuildMsg[TMSG_INDEX(TDMT_MND_GET_USER_AUTH)]((void *)user, &msg, 0, &msgLen); - if (code) { - ctgError("Build get user auth msg failed, code:%x, db:%s", code, user); - CTG_ERR_RET(code); - } - - SRpcMsg rpcMsg = { - .msgType = TDMT_MND_GET_USER_AUTH, - .pCont = msg, - .contLen = msgLen, - }; - - SRpcMsg rpcRsp = {0}; - - rpcSendRecv(pRpc, (SEpSet *)pMgmtEps, &rpcMsg, &rpcRsp); - if (TSDB_CODE_SUCCESS != rpcRsp.code) { - ctgError("error rsp for get user auth, error:%s, user:%s", tstrerror(rpcRsp.code), user); - CTG_ERR_RET(rpcRsp.code); - } - - code = queryProcessMsgRsp[TMSG_INDEX(TDMT_MND_GET_USER_AUTH)](authRsp, rpcRsp.pCont, rpcRsp.contLen); - if (code) { - ctgError("Process get user auth rsp failed, code:%x, user:%s", code, user); - CTG_ERR_RET(code); - } - - ctgDebug("Got user auth from mnode, user:%s", user); - - return TSDB_CODE_SUCCESS; -} - -int32_t ctgIsTableMetaExistInCache(SCatalog *pCtg, char *dbFName, char *tbName, int32_t *exist) { - if (NULL == pCtg->dbCache) { - *exist = 0; - ctgWarn("empty db cache, dbFName:%s, tbName:%s", dbFName, tbName); - return TSDB_CODE_SUCCESS; - } - - SCtgDBCache *dbCache = NULL; - ctgAcquireDBCache(pCtg, dbFName, &dbCache); - if (NULL == dbCache) { - *exist = 0; - return TSDB_CODE_SUCCESS; - } - - size_t sz = 0; - CTG_LOCK(CTG_READ, &dbCache->tbCache.metaLock); - STableMeta *tbMeta = taosHashGet(dbCache->tbCache.metaCache, tbName, strlen(tbName)); - CTG_UNLOCK(CTG_READ, &dbCache->tbCache.metaLock); - - if (NULL == tbMeta) { - ctgReleaseDBCache(pCtg, dbCache); - - *exist = 0; - ctgDebug("tbmeta not in cache, dbFName:%s, tbName:%s", dbFName, tbName); - return TSDB_CODE_SUCCESS; - } - - *exist = 1; - - ctgReleaseDBCache(pCtg, dbCache); - - ctgDebug("tbmeta is in cache, dbFName:%s, tbName:%s", dbFName, tbName); - - return TSDB_CODE_SUCCESS; -} - -int32_t ctgGetTableMetaFromCache(SCatalog *pCtg, const SName *pTableName, STableMeta **pTableMeta, bool *inCache, - int32_t flag, uint64_t *dbId) { - if (NULL == pCtg->dbCache) { - ctgDebug("empty tbmeta cache, tbName:%s", pTableName->tname); - goto _return; - } - - char dbFName[TSDB_DB_FNAME_LEN] = {0}; - if (CTG_FLAG_IS_SYS_DB(flag)) { - strcpy(dbFName, pTableName->dbname); - } else { - tNameGetFullDbName(pTableName, dbFName); - } - - *pTableMeta = NULL; - - SCtgDBCache *dbCache = NULL; - ctgAcquireDBCache(pCtg, dbFName, &dbCache); - if (NULL == dbCache) { - ctgDebug("db %s not in cache", pTableName->tname); - goto _return; - } - - int32_t sz = 0; - CTG_LOCK(CTG_READ, &dbCache->tbCache.metaLock); - int32_t code = taosHashGetDup_m(dbCache->tbCache.metaCache, pTableName->tname, strlen(pTableName->tname), - (void **)pTableMeta, &sz); - CTG_UNLOCK(CTG_READ, &dbCache->tbCache.metaLock); - - if (NULL == *pTableMeta) { - ctgReleaseDBCache(pCtg, dbCache); - ctgDebug("tbl not in cache, dbFName:%s, tbName:%s", dbFName, pTableName->tname); - goto _return; - } - - if (dbId) { - *dbId = dbCache->dbId; - } - - STableMeta *tbMeta = *pTableMeta; - - if (tbMeta->tableType != TSDB_CHILD_TABLE) { - ctgReleaseDBCache(pCtg, dbCache); - ctgDebug("Got meta from cache, type:%d, dbFName:%s, tbName:%s", tbMeta->tableType, dbFName, pTableName->tname); - - *inCache = true; - CTG_CACHE_STAT_ADD(tblHitNum, 1); - - return TSDB_CODE_SUCCESS; - } - - ctgDebug("Got subtable meta from cache, type:%d, dbFName:%s, tbName:%s, suid:%" PRIx64, tbMeta->tableType, dbFName, - pTableName->tname, tbMeta->suid); - - CTG_LOCK(CTG_READ, &dbCache->tbCache.stbLock); - - STableMeta **stbMeta = taosHashGet(dbCache->tbCache.stbCache, &tbMeta->suid, sizeof(tbMeta->suid)); - if (NULL == stbMeta || NULL == *stbMeta) { - CTG_UNLOCK(CTG_READ, &dbCache->tbCache.stbLock); - ctgReleaseDBCache(pCtg, dbCache); - ctgError("stb not in stbCache, suid:%" PRIx64, tbMeta->suid); - taosMemoryFreeClear(*pTableMeta); - goto _return; - } - - if ((*stbMeta)->suid != tbMeta->suid) { - CTG_UNLOCK(CTG_READ, &dbCache->tbCache.stbLock); - ctgReleaseDBCache(pCtg, dbCache); - taosMemoryFreeClear(*pTableMeta); - ctgError("stable suid in stbCache mis-match, expected suid:%" PRIx64 ",actual suid:%" PRIx64, tbMeta->suid, - (*stbMeta)->suid); - CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR); - } - - int32_t metaSize = CTG_META_SIZE(*stbMeta); - *pTableMeta = taosMemoryRealloc(*pTableMeta, metaSize); - if (NULL == *pTableMeta) { - CTG_UNLOCK(CTG_READ, &dbCache->tbCache.stbLock); - ctgReleaseDBCache(pCtg, dbCache); - ctgError("realloc size[%d] failed", metaSize); - CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); - } - - memcpy(&(*pTableMeta)->sversion, &(*stbMeta)->sversion, metaSize - sizeof(SCTableMeta)); - - CTG_UNLOCK(CTG_READ, &dbCache->tbCache.stbLock); - - ctgReleaseDBCache(pCtg, dbCache); - - *inCache = true; - CTG_CACHE_STAT_ADD(tblHitNum, 1); - - ctgDebug("Got tbmeta from cache, dbFName:%s, tbName:%s", dbFName, pTableName->tname); - - return TSDB_CODE_SUCCESS; - -_return: - - *inCache = false; - CTG_CACHE_STAT_ADD(tblMissNum, 1); - - return TSDB_CODE_SUCCESS; -} - -int32_t ctgGetTableTypeFromCache(SCatalog *pCtg, const char *dbFName, const char *tableName, int32_t *tbType) { - if (NULL == pCtg->dbCache) { - ctgWarn("empty db cache, dbFName:%s, tbName:%s", dbFName, tableName); - return TSDB_CODE_SUCCESS; - } - - SCtgDBCache *dbCache = NULL; - ctgAcquireDBCache(pCtg, dbFName, &dbCache); - if (NULL == dbCache) { - return TSDB_CODE_SUCCESS; - } - - CTG_LOCK(CTG_READ, &dbCache->tbCache.metaLock); - STableMeta *pTableMeta = (STableMeta *)taosHashAcquire(dbCache->tbCache.metaCache, tableName, strlen(tableName)); - - if (NULL == pTableMeta) { - CTG_UNLOCK(CTG_READ, &dbCache->tbCache.metaLock); - ctgWarn("tbl not in cache, dbFName:%s, tbName:%s", dbFName, tableName); - ctgReleaseDBCache(pCtg, dbCache); - - return TSDB_CODE_SUCCESS; - } - - *tbType = atomic_load_8(&pTableMeta->tableType); - - taosHashRelease(dbCache->tbCache.metaCache, pTableMeta); - - CTG_UNLOCK(CTG_READ, &dbCache->tbCache.metaLock); - - ctgReleaseDBCache(pCtg, dbCache); - - ctgDebug("Got tbtype from cache, dbFName:%s, tbName:%s, type:%d", dbFName, tableName, *tbType); - - return TSDB_CODE_SUCCESS; -} - -int32_t ctgChkAuthFromCache(SCatalog *pCtg, const char *user, const char *dbFName, AUTH_TYPE type, bool *inCache, - bool *pass) { - if (NULL == pCtg->userCache) { - ctgDebug("empty user auth cache, user:%s", user); - goto _return; - } - - SCtgUserAuth *pUser = (SCtgUserAuth *)taosHashGet(pCtg->userCache, user, strlen(user)); - if (NULL == pUser) { - ctgDebug("user not in cache, user:%s", user); - goto _return; - } - - *inCache = true; - - ctgDebug("Got user from cache, user:%s", user); - CTG_CACHE_STAT_ADD(userHitNum, 1); - - if (pUser->superUser) { - *pass = true; - return TSDB_CODE_SUCCESS; - } - - CTG_LOCK(CTG_READ, &pUser->lock); - if (pUser->createdDbs && taosHashGet(pUser->createdDbs, dbFName, strlen(dbFName))) { - *pass = true; - CTG_UNLOCK(CTG_READ, &pUser->lock); - return TSDB_CODE_SUCCESS; - } - - if (pUser->readDbs && taosHashGet(pUser->readDbs, dbFName, strlen(dbFName)) && type == AUTH_TYPE_READ) { - *pass = true; - } - - if (pUser->writeDbs && taosHashGet(pUser->writeDbs, dbFName, strlen(dbFName)) && type == AUTH_TYPE_WRITE) { - *pass = true; - } - - CTG_UNLOCK(CTG_READ, &pUser->lock); - - return TSDB_CODE_SUCCESS; - -_return: - - *inCache = false; - CTG_CACHE_STAT_ADD(userMissNum, 1); - - return TSDB_CODE_SUCCESS; -} - -int32_t ctgGetTableMetaFromMnodeImpl(SCatalog *pCtg, void *pTrans, const SEpSet *pMgmtEps, char *dbFName, char *tbName, - STableMetaOutput *output) { - SBuildTableMetaInput bInput = {.vgId = 0, .dbFName = dbFName, .tbName = tbName}; - char *msg = NULL; - SEpSet *pVnodeEpSet = NULL; - int32_t msgLen = 0; - - ctgDebug("try to get table meta from mnode, dbFName:%s, tbName:%s", dbFName, tbName); - - int32_t code = queryBuildMsg[TMSG_INDEX(TDMT_MND_TABLE_META)](&bInput, &msg, 0, &msgLen); - if (code) { - ctgError("Build mnode stablemeta msg failed, code:%x", code); - CTG_ERR_RET(code); - } - - SRpcMsg rpcMsg = { - .msgType = TDMT_MND_TABLE_META, - .pCont = msg, - .contLen = msgLen, - }; - - SRpcMsg rpcRsp = {0}; - - rpcSendRecv(pTrans, (SEpSet *)pMgmtEps, &rpcMsg, &rpcRsp); - - if (TSDB_CODE_SUCCESS != rpcRsp.code) { - if (CTG_TABLE_NOT_EXIST(rpcRsp.code)) { - SET_META_TYPE_NULL(output->metaType); - ctgDebug("stablemeta not exist in mnode, dbFName:%s, tbName:%s", dbFName, tbName); - return TSDB_CODE_SUCCESS; - } - - ctgError("error rsp for stablemeta from mnode, code:%s, dbFName:%s, tbName:%s", tstrerror(rpcRsp.code), dbFName, - tbName); - CTG_ERR_RET(rpcRsp.code); - } - - code = queryProcessMsgRsp[TMSG_INDEX(TDMT_MND_TABLE_META)](output, rpcRsp.pCont, rpcRsp.contLen); - if (code) { - ctgError("Process mnode stablemeta rsp failed, code:%x, dbFName:%s, tbName:%s", code, dbFName, tbName); - CTG_ERR_RET(code); - } - - ctgDebug("Got table meta from mnode, dbFName:%s, tbName:%s", dbFName, tbName); - - return TSDB_CODE_SUCCESS; -} - -int32_t ctgGetTableMetaFromMnode(SCatalog *pCtg, void *pTrans, const SEpSet *pMgmtEps, const SName *pTableName, - STableMetaOutput *output) { - char dbFName[TSDB_DB_FNAME_LEN]; - tNameGetFullDbName(pTableName, dbFName); - - return ctgGetTableMetaFromMnodeImpl(pCtg, pTrans, pMgmtEps, dbFName, (char *)pTableName->tname, output); -} - -int32_t ctgGetTableMetaFromVnodeImpl(SCatalog *pCtg, void *pTrans, const SEpSet *pMgmtEps, const SName *pTableName, - SVgroupInfo *vgroupInfo, STableMetaOutput *output) { - if (NULL == pCtg || NULL == pTrans || NULL == pMgmtEps || NULL == pTableName || NULL == vgroupInfo || - NULL == output) { - CTG_ERR_RET(TSDB_CODE_CTG_INVALID_INPUT); - } - - char dbFName[TSDB_DB_FNAME_LEN]; - tNameGetFullDbName(pTableName, dbFName); - - ctgDebug("try to get table meta from vnode, dbFName:%s, tbName:%s", dbFName, tNameGetTableName(pTableName)); - - SBuildTableMetaInput bInput = { - .vgId = vgroupInfo->vgId, .dbFName = dbFName, .tbName = (char *)tNameGetTableName(pTableName)}; - char *msg = NULL; - int32_t msgLen = 0; - - int32_t code = queryBuildMsg[TMSG_INDEX(TDMT_VND_TABLE_META)](&bInput, &msg, 0, &msgLen); - if (code) { - ctgError("Build vnode tablemeta msg failed, code:%x, dbFName:%s, tbName:%s", code, dbFName, - tNameGetTableName(pTableName)); - CTG_ERR_RET(code); - } - - SRpcMsg rpcMsg = { - .msgType = TDMT_VND_TABLE_META, - .pCont = msg, - .contLen = msgLen, - }; - - SRpcMsg rpcRsp = {0}; - rpcSendRecv(pTrans, &vgroupInfo->epSet, &rpcMsg, &rpcRsp); - - if (TSDB_CODE_SUCCESS != rpcRsp.code) { - if (CTG_TABLE_NOT_EXIST(rpcRsp.code)) { - SET_META_TYPE_NULL(output->metaType); - ctgDebug("tablemeta not exist in vnode, dbFName:%s, tbName:%s", dbFName, tNameGetTableName(pTableName)); - return TSDB_CODE_SUCCESS; - } - - ctgError("error rsp for table meta from vnode, code:%s, dbFName:%s, tbName:%s", tstrerror(rpcRsp.code), dbFName, - tNameGetTableName(pTableName)); - CTG_ERR_RET(rpcRsp.code); - } - - code = queryProcessMsgRsp[TMSG_INDEX(TDMT_VND_TABLE_META)](output, rpcRsp.pCont, rpcRsp.contLen); - if (code) { - ctgError("Process vnode tablemeta rsp failed, code:%s, dbFName:%s, tbName:%s", tstrerror(code), dbFName, - tNameGetTableName(pTableName)); - CTG_ERR_RET(code); - } - - ctgDebug("Got table meta from vnode, dbFName:%s, tbName:%s", dbFName, tNameGetTableName(pTableName)); - return TSDB_CODE_SUCCESS; -} - -int32_t ctgGetTableMetaFromVnode(SCatalog *pCtg, void *pTrans, const SEpSet *pMgmtEps, const SName *pTableName, - SVgroupInfo *vgroupInfo, STableMetaOutput *output) { - int32_t code = 0; - int32_t retryNum = 0; - - while (retryNum < CTG_DEFAULT_MAX_RETRY_TIMES) { - code = ctgGetTableMetaFromVnodeImpl(pCtg, pTrans, pMgmtEps, pTableName, vgroupInfo, output); - if (code) { - if (TSDB_CODE_VND_HASH_MISMATCH == code) { - char dbFName[TSDB_DB_FNAME_LEN]; - tNameGetFullDbName(pTableName, dbFName); - - code = catalogRefreshDBVgInfo(pCtg, pTrans, pMgmtEps, dbFName); - if (code != TSDB_CODE_SUCCESS) { - break; - } - - ++retryNum; - continue; - } - } - - break; - } - - CTG_RET(code); -} - -int32_t ctgGetHashFunction(int8_t hashMethod, tableNameHashFp *fp) { - switch (hashMethod) { - default: - *fp = MurmurHash3_32; - break; - } - - return TSDB_CODE_SUCCESS; -} - -int32_t ctgGenerateVgList(SCatalog *pCtg, SHashObj *vgHash, SArray **pList) { - SHashObj *vgroupHash = NULL; - SVgroupInfo *vgInfo = NULL; - SArray *vgList = NULL; - int32_t code = 0; - int32_t vgNum = taosHashGetSize(vgHash); - - vgList = taosArrayInit(vgNum, sizeof(SVgroupInfo)); - if (NULL == vgList) { - ctgError("taosArrayInit failed, num:%d", vgNum); - CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); - } - - void *pIter = taosHashIterate(vgHash, NULL); - while (pIter) { - vgInfo = pIter; - - if (NULL == taosArrayPush(vgList, vgInfo)) { - ctgError("taosArrayPush failed, vgId:%d", vgInfo->vgId); - taosHashCancelIterate(vgHash, pIter); - CTG_ERR_JRET(TSDB_CODE_CTG_MEM_ERROR); - } - - pIter = taosHashIterate(vgHash, pIter); - vgInfo = NULL; - } - - *pList = vgList; - - ctgDebug("Got vgList from cache, vgNum:%d", vgNum); - - return TSDB_CODE_SUCCESS; - -_return: - - if (vgList) { - taosArrayDestroy(vgList); - } - - CTG_RET(code); -} - -int32_t ctgGetVgInfoFromHashValue(SCatalog *pCtg, SDBVgInfo *dbInfo, const SName *pTableName, SVgroupInfo *pVgroup) { - int32_t code = 0; - - int32_t vgNum = taosHashGetSize(dbInfo->vgHash); - char db[TSDB_DB_FNAME_LEN] = {0}; - tNameGetFullDbName(pTableName, db); - - if (vgNum <= 0) { - ctgError("db vgroup cache invalid, db:%s, vgroup number:%d", db, vgNum); - CTG_ERR_RET(TSDB_CODE_TSC_DB_NOT_SELECTED); - } - - tableNameHashFp fp = NULL; - SVgroupInfo *vgInfo = NULL; - - CTG_ERR_RET(ctgGetHashFunction(dbInfo->hashMethod, &fp)); - - char tbFullName[TSDB_TABLE_FNAME_LEN]; - tNameExtractFullName(pTableName, tbFullName); - - uint32_t hashValue = (*fp)(tbFullName, (uint32_t)strlen(tbFullName)); - - void *pIter = taosHashIterate(dbInfo->vgHash, NULL); - while (pIter) { - vgInfo = pIter; - if (hashValue >= vgInfo->hashBegin && hashValue <= vgInfo->hashEnd) { - taosHashCancelIterate(dbInfo->vgHash, pIter); - break; - } - - pIter = taosHashIterate(dbInfo->vgHash, pIter); - vgInfo = NULL; - } - - if (NULL == vgInfo) { - ctgError("no hash range found for hash value [%u], db:%s, numOfVgId:%d", hashValue, db, - taosHashGetSize(dbInfo->vgHash)); - CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR); - } - - *pVgroup = *vgInfo; - - CTG_RET(code); -} - -int32_t ctgStbVersionSearchCompare(const void *key1, const void *key2) { - if (*(uint64_t *)key1 < ((SSTableMetaVersion *)key2)->suid) { - return -1; - } else if (*(uint64_t *)key1 > ((SSTableMetaVersion *)key2)->suid) { - return 1; - } else { - return 0; - } -} - -int32_t ctgDbVgVersionSearchCompare(const void *key1, const void *key2) { - if (*(int64_t *)key1 < ((SDbVgVersion *)key2)->dbId) { - return -1; - } else if (*(int64_t *)key1 > ((SDbVgVersion *)key2)->dbId) { - return 1; - } else { - return 0; - } -} - -int32_t ctgStbVersionSortCompare(const void *key1, const void *key2) { - if (((SSTableMetaVersion *)key1)->suid < ((SSTableMetaVersion *)key2)->suid) { - return -1; - } else if (((SSTableMetaVersion *)key1)->suid > ((SSTableMetaVersion *)key2)->suid) { - return 1; - } else { - return 0; - } -} - -int32_t ctgDbVgVersionSortCompare(const void *key1, const void *key2) { - if (((SDbVgVersion *)key1)->dbId < ((SDbVgVersion *)key2)->dbId) { - return -1; - } else if (((SDbVgVersion *)key1)->dbId > ((SDbVgVersion *)key2)->dbId) { - return 1; - } else { - return 0; - } -} - -int32_t ctgMetaRentInit(SCtgRentMgmt *mgmt, uint32_t rentSec, int8_t type) { - mgmt->slotRIdx = 0; - mgmt->slotNum = rentSec / CTG_RENT_SLOT_SECOND; - mgmt->type = type; - - size_t msgSize = sizeof(SCtgRentSlot) * mgmt->slotNum; - - mgmt->slots = taosMemoryCalloc(1, msgSize); - if (NULL == mgmt->slots) { - qError("calloc %d failed", (int32_t)msgSize); - CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); - } - - qDebug("meta rent initialized, type:%d, slotNum:%d", type, mgmt->slotNum); - - return TSDB_CODE_SUCCESS; -} - -int32_t ctgMetaRentAdd(SCtgRentMgmt *mgmt, void *meta, int64_t id, int32_t size) { - int16_t widx = abs((int)(id % mgmt->slotNum)); - - SCtgRentSlot *slot = &mgmt->slots[widx]; - int32_t code = 0; - - CTG_LOCK(CTG_WRITE, &slot->lock); - if (NULL == slot->meta) { - slot->meta = taosArrayInit(CTG_DEFAULT_RENT_SLOT_SIZE, size); - if (NULL == slot->meta) { - qError("taosArrayInit %d failed, id:%" PRIx64 ", slot idx:%d, type:%d", CTG_DEFAULT_RENT_SLOT_SIZE, id, widx, - mgmt->type); - CTG_ERR_JRET(TSDB_CODE_CTG_MEM_ERROR); - } - } - - if (NULL == taosArrayPush(slot->meta, meta)) { - qError("taosArrayPush meta to rent failed, id:%" PRIx64 ", slot idx:%d, type:%d", id, widx, mgmt->type); - CTG_ERR_JRET(TSDB_CODE_CTG_MEM_ERROR); - } - - slot->needSort = true; - - qDebug("add meta to rent, id:%" PRIx64 ", slot idx:%d, type:%d", id, widx, mgmt->type); - -_return: - - CTG_UNLOCK(CTG_WRITE, &slot->lock); - CTG_RET(code); -} - -int32_t ctgMetaRentUpdate(SCtgRentMgmt *mgmt, void *meta, int64_t id, int32_t size, __compar_fn_t sortCompare, - __compar_fn_t searchCompare) { - int16_t widx = abs((int)(id % mgmt->slotNum)); - - SCtgRentSlot *slot = &mgmt->slots[widx]; - int32_t code = 0; - - CTG_LOCK(CTG_WRITE, &slot->lock); - if (NULL == slot->meta) { - qError("empty meta slot, id:%" PRIx64 ", slot idx:%d, type:%d", id, widx, mgmt->type); - CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR); - } - - if (slot->needSort) { - qDebug("meta slot before sorte, slot idx:%d, type:%d, size:%d", widx, mgmt->type, - (int32_t)taosArrayGetSize(slot->meta)); - taosArraySort(slot->meta, sortCompare); - slot->needSort = false; - qDebug("meta slot sorted, slot idx:%d, type:%d, size:%d", widx, mgmt->type, (int32_t)taosArrayGetSize(slot->meta)); - } - - void *orig = taosArraySearch(slot->meta, &id, searchCompare, TD_EQ); - if (NULL == orig) { - qError("meta not found in slot, id:%" PRIx64 ", slot idx:%d, type:%d, size:%d", id, widx, mgmt->type, - (int32_t)taosArrayGetSize(slot->meta)); - CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR); - } - - memcpy(orig, meta, size); - - qDebug("meta in rent updated, id:%" PRIx64 ", slot idx:%d, type:%d", id, widx, mgmt->type); - -_return: - - CTG_UNLOCK(CTG_WRITE, &slot->lock); - - if (code) { - qWarn("meta in rent update failed, will try to add it, code:%x, id:%" PRIx64 ", slot idx:%d, type:%d", code, id, - widx, mgmt->type); - CTG_RET(ctgMetaRentAdd(mgmt, meta, id, size)); - } - - CTG_RET(code); -} - -int32_t ctgMetaRentRemove(SCtgRentMgmt *mgmt, int64_t id, __compar_fn_t sortCompare, __compar_fn_t searchCompare) { - int16_t widx = abs((int)(id % mgmt->slotNum)); - - SCtgRentSlot *slot = &mgmt->slots[widx]; - int32_t code = 0; - - CTG_LOCK(CTG_WRITE, &slot->lock); - if (NULL == slot->meta) { - qError("empty meta slot, id:%" PRIx64 ", slot idx:%d, type:%d", id, widx, mgmt->type); - CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR); - } - - if (slot->needSort) { - taosArraySort(slot->meta, sortCompare); - slot->needSort = false; - qDebug("meta slot sorted, slot idx:%d, type:%d", widx, mgmt->type); - } - - int32_t idx = taosArraySearchIdx(slot->meta, &id, searchCompare, TD_EQ); - if (idx < 0) { - qError("meta not found in slot, id:%" PRIx64 ", slot idx:%d, type:%d", id, widx, mgmt->type); - CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR); - } - - taosArrayRemove(slot->meta, idx); - - qDebug("meta in rent removed, id:%" PRIx64 ", slot idx:%d, type:%d", id, widx, mgmt->type); - -_return: - - CTG_UNLOCK(CTG_WRITE, &slot->lock); - - CTG_RET(code); -} - -int32_t ctgMetaRentGetImpl(SCtgRentMgmt *mgmt, void **res, uint32_t *num, int32_t size) { - int16_t ridx = atomic_add_fetch_16(&mgmt->slotRIdx, 1); - if (ridx >= mgmt->slotNum) { - ridx %= mgmt->slotNum; - atomic_store_16(&mgmt->slotRIdx, ridx); - } - - SCtgRentSlot *slot = &mgmt->slots[ridx]; - int32_t code = 0; - - CTG_LOCK(CTG_READ, &slot->lock); - if (NULL == slot->meta) { - qDebug("empty meta in slot:%d, type:%d", ridx, mgmt->type); - *num = 0; - goto _return; - } - - size_t metaNum = taosArrayGetSize(slot->meta); - if (metaNum <= 0) { - qDebug("no meta in slot:%d, type:%d", ridx, mgmt->type); - *num = 0; - goto _return; - } - - size_t msize = metaNum * size; - *res = taosMemoryMalloc(msize); - if (NULL == *res) { - qError("malloc %d failed", (int32_t)msize); - CTG_ERR_JRET(TSDB_CODE_CTG_MEM_ERROR); - } - - void *meta = taosArrayGet(slot->meta, 0); - - memcpy(*res, meta, msize); - - *num = (uint32_t)metaNum; - - qDebug("Got %d meta from rent, type:%d", (int32_t)metaNum, mgmt->type); - -_return: - - CTG_UNLOCK(CTG_READ, &slot->lock); - - CTG_RET(code); -} - -int32_t ctgMetaRentGet(SCtgRentMgmt *mgmt, void **res, uint32_t *num, int32_t size) { - while (true) { - int64_t msec = taosGetTimestampMs(); - int64_t lsec = atomic_load_64(&mgmt->lastReadMsec); - if ((msec - lsec) < CTG_RENT_SLOT_SECOND * 1000) { - *res = NULL; - *num = 0; - qDebug("too short time period to get expired meta, type:%d", mgmt->type); - return TSDB_CODE_SUCCESS; - } - - if (lsec != atomic_val_compare_exchange_64(&mgmt->lastReadMsec, lsec, msec)) { - continue; - } - - break; - } - - CTG_ERR_RET(ctgMetaRentGetImpl(mgmt, res, num, size)); - - return TSDB_CODE_SUCCESS; -} - -int32_t ctgAddNewDBCache(SCatalog *pCtg, const char *dbFName, uint64_t dbId) { - int32_t code = 0; - - SCtgDBCache newDBCache = {0}; - newDBCache.dbId = dbId; - - newDBCache.tbCache.metaCache = taosHashInit(gCtgMgmt.cfg.maxTblCacheNum, - taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK); - if (NULL == newDBCache.tbCache.metaCache) { - ctgError("taosHashInit %d metaCache failed", gCtgMgmt.cfg.maxTblCacheNum); - CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); - } - - newDBCache.tbCache.stbCache = taosHashInit(gCtgMgmt.cfg.maxTblCacheNum, - taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), true, HASH_ENTRY_LOCK); - if (NULL == newDBCache.tbCache.stbCache) { - ctgError("taosHashInit %d stbCache failed", gCtgMgmt.cfg.maxTblCacheNum); - CTG_ERR_JRET(TSDB_CODE_CTG_MEM_ERROR); - } - - code = taosHashPut(pCtg->dbCache, dbFName, strlen(dbFName), &newDBCache, sizeof(SCtgDBCache)); - if (code) { - if (HASH_NODE_EXIST(code)) { - ctgDebug("db already in cache, dbFName:%s", dbFName); - goto _return; - } - - ctgError("taosHashPut db to cache failed, dbFName:%s", dbFName); - CTG_ERR_JRET(TSDB_CODE_CTG_MEM_ERROR); - } - - CTG_CACHE_STAT_ADD(dbNum, 1); - - SDbVgVersion vgVersion = {.dbId = newDBCache.dbId, .vgVersion = -1}; - strncpy(vgVersion.dbFName, dbFName, sizeof(vgVersion.dbFName)); - - ctgDebug("db added to cache, dbFName:%s, dbId:%" PRIx64, dbFName, dbId); - - CTG_ERR_RET(ctgMetaRentAdd(&pCtg->dbRent, &vgVersion, dbId, sizeof(SDbVgVersion))); - - ctgDebug("db added to rent, dbFName:%s, vgVersion:%d, dbId:%" PRIx64, dbFName, vgVersion.vgVersion, dbId); - - return TSDB_CODE_SUCCESS; - -_return: - - ctgFreeDbCache(&newDBCache); - - CTG_RET(code); -} - -void ctgRemoveStbRent(SCatalog *pCtg, SCtgTbMetaCache *cache) { - CTG_LOCK(CTG_WRITE, &cache->stbLock); - if (cache->stbCache) { - void *pIter = taosHashIterate(cache->stbCache, NULL); - while (pIter) { - uint64_t *suid = NULL; - suid = taosHashGetKey(pIter, NULL); - - if (TSDB_CODE_SUCCESS == - ctgMetaRentRemove(&pCtg->stbRent, *suid, ctgStbVersionSortCompare, ctgStbVersionSearchCompare)) { - ctgDebug("stb removed from rent, suid:%" PRIx64, *suid); - } - - pIter = taosHashIterate(cache->stbCache, pIter); - } - } - CTG_UNLOCK(CTG_WRITE, &cache->stbLock); -} - -int32_t ctgRemoveDB(SCatalog *pCtg, SCtgDBCache *dbCache, const char *dbFName) { - uint64_t dbId = dbCache->dbId; - - ctgInfo("start to remove db from cache, dbFName:%s, dbId:%" PRIx64, dbFName, dbCache->dbId); - - atomic_store_8(&dbCache->deleted, 1); - - ctgRemoveStbRent(pCtg, &dbCache->tbCache); - - ctgFreeDbCache(dbCache); - - CTG_ERR_RET(ctgMetaRentRemove(&pCtg->dbRent, dbCache->dbId, ctgDbVgVersionSortCompare, ctgDbVgVersionSearchCompare)); - - ctgDebug("db removed from rent, dbFName:%s, dbId:%" PRIx64, dbFName, dbCache->dbId); - - if (taosHashRemove(pCtg->dbCache, dbFName, strlen(dbFName))) { - ctgInfo("taosHashRemove from dbCache failed, may be removed, dbFName:%s", dbFName); - CTG_ERR_RET(TSDB_CODE_CTG_DB_DROPPED); - } - - CTG_CACHE_STAT_SUB(dbNum, 1); - - ctgInfo("db removed from cache, dbFName:%s, dbId:%" PRIx64, dbFName, dbId); - - return TSDB_CODE_SUCCESS; -} - -int32_t ctgGetAddDBCache(SCatalog *pCtg, const char *dbFName, uint64_t dbId, SCtgDBCache **pCache) { - int32_t code = 0; - SCtgDBCache *dbCache = NULL; - ctgGetDBCache(pCtg, dbFName, &dbCache); - - if (dbCache) { - // TODO OPEN IT -#if 0 - if (dbCache->dbId == dbId) { - *pCache = dbCache; - return TSDB_CODE_SUCCESS; - } -#else - if (0 == dbId) { - *pCache = dbCache; - return TSDB_CODE_SUCCESS; - } - - if (dbId && (dbCache->dbId == 0)) { - dbCache->dbId = dbId; - *pCache = dbCache; - return TSDB_CODE_SUCCESS; - } - - if (dbCache->dbId == dbId) { - *pCache = dbCache; - return TSDB_CODE_SUCCESS; - } -#endif - CTG_ERR_RET(ctgRemoveDB(pCtg, dbCache, dbFName)); - } - - CTG_ERR_RET(ctgAddNewDBCache(pCtg, dbFName, dbId)); - - ctgGetDBCache(pCtg, dbFName, &dbCache); - - *pCache = dbCache; - - return TSDB_CODE_SUCCESS; -} - -int32_t ctgUpdateDBVgInfo(SCatalog *pCtg, const char *dbFName, uint64_t dbId, SDBVgInfo **pDbInfo) { - int32_t code = 0; - SDBVgInfo *dbInfo = *pDbInfo; - - if (NULL == dbInfo->vgHash) { - return TSDB_CODE_SUCCESS; - } - - if (dbInfo->vgVersion < 0 || taosHashGetSize(dbInfo->vgHash) <= 0) { - ctgError("invalid db vgInfo, dbFName:%s, vgHash:%p, vgVersion:%d, vgHashSize:%d", dbFName, dbInfo->vgHash, - dbInfo->vgVersion, taosHashGetSize(dbInfo->vgHash)); - CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); - } - - bool newAdded = false; - SDbVgVersion vgVersion = {.dbId = dbId, .vgVersion = dbInfo->vgVersion, .numOfTable = dbInfo->numOfTable}; - - SCtgDBCache *dbCache = NULL; - CTG_ERR_RET(ctgGetAddDBCache(pCtg, dbFName, dbId, &dbCache)); - if (NULL == dbCache) { - ctgInfo("conflict db update, ignore this update, dbFName:%s, dbId:%" PRIx64, dbFName, dbId); - CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR); - } - - SDBVgInfo *vgInfo = NULL; - CTG_ERR_RET(ctgWAcquireVgInfo(pCtg, dbCache)); - - if (dbCache->vgInfo) { - if (dbInfo->vgVersion < dbCache->vgInfo->vgVersion) { - ctgDebug("db vgVersion is old, dbFName:%s, vgVersion:%d, currentVersion:%d", dbFName, dbInfo->vgVersion, - dbCache->vgInfo->vgVersion); - ctgWReleaseVgInfo(dbCache); - - return TSDB_CODE_SUCCESS; - } - - if (dbInfo->vgVersion == dbCache->vgInfo->vgVersion && dbInfo->numOfTable == dbCache->vgInfo->numOfTable) { - ctgDebug("no new db vgVersion or numOfTable, dbFName:%s, vgVersion:%d, numOfTable:%d", dbFName, dbInfo->vgVersion, - dbInfo->numOfTable); - ctgWReleaseVgInfo(dbCache); - - return TSDB_CODE_SUCCESS; - } - - ctgFreeVgInfo(dbCache->vgInfo); - } - - dbCache->vgInfo = dbInfo; - - *pDbInfo = NULL; - - ctgDebug("db vgInfo updated, dbFName:%s, vgVersion:%d, dbId:%" PRIx64, dbFName, vgVersion.vgVersion, vgVersion.dbId); - - ctgWReleaseVgInfo(dbCache); - - dbCache = NULL; - - strncpy(vgVersion.dbFName, dbFName, sizeof(vgVersion.dbFName)); - CTG_ERR_RET(ctgMetaRentUpdate(&pCtg->dbRent, &vgVersion, vgVersion.dbId, sizeof(SDbVgVersion), - ctgDbVgVersionSortCompare, ctgDbVgVersionSearchCompare)); - - CTG_RET(code); -} - -int32_t ctgUpdateTblMeta(SCatalog *pCtg, SCtgDBCache *dbCache, char *dbFName, uint64_t dbId, char *tbName, - STableMeta *meta, int32_t metaSize) { - SCtgTbMetaCache *tbCache = &dbCache->tbCache; - - CTG_LOCK(CTG_READ, &tbCache->metaLock); - if (dbCache->deleted || NULL == tbCache->metaCache || NULL == tbCache->stbCache) { - CTG_UNLOCK(CTG_READ, &tbCache->metaLock); - ctgError("db is dropping, dbId:%" PRIx64, dbCache->dbId); - CTG_ERR_RET(TSDB_CODE_CTG_DB_DROPPED); - } - - int8_t origType = 0; - uint64_t origSuid = 0; - bool isStb = meta->tableType == TSDB_SUPER_TABLE; - STableMeta *orig = taosHashGet(tbCache->metaCache, tbName, strlen(tbName)); - if (orig) { - origType = orig->tableType; - - if (origType == meta->tableType && orig->uid == meta->uid && orig->sversion >= meta->sversion && - orig->tversion >= meta->tversion) { - CTG_UNLOCK(CTG_READ, &tbCache->metaLock); - return TSDB_CODE_SUCCESS; - } - - if (origType == TSDB_SUPER_TABLE) { - if ((!isStb) || orig->suid != meta->suid) { - CTG_LOCK(CTG_WRITE, &tbCache->stbLock); - if (taosHashRemove(tbCache->stbCache, &orig->suid, sizeof(orig->suid))) { - ctgError("stb not exist in stbCache, dbFName:%s, stb:%s, suid:%" PRIx64, dbFName, tbName, orig->suid); - } else { - CTG_CACHE_STAT_SUB(stblNum, 1); - } - CTG_UNLOCK(CTG_WRITE, &tbCache->stbLock); - - ctgDebug("stb removed from stbCache, dbFName:%s, stb:%s, suid:%" PRIx64, dbFName, tbName, orig->suid); - - ctgMetaRentRemove(&pCtg->stbRent, orig->suid, ctgStbVersionSortCompare, ctgStbVersionSearchCompare); - } - - origSuid = orig->suid; - } - } - - if (isStb) { - CTG_LOCK(CTG_WRITE, &tbCache->stbLock); - } - - if (taosHashPut(tbCache->metaCache, tbName, strlen(tbName), meta, metaSize) != 0) { - if (isStb) { - CTG_UNLOCK(CTG_WRITE, &tbCache->stbLock); - } - - CTG_UNLOCK(CTG_READ, &tbCache->metaLock); - ctgError("taosHashPut tbmeta to cache failed, dbFName:%s, tbName:%s, tbType:%d", dbFName, tbName, meta->tableType); - CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); - } - - if (NULL == orig) { - CTG_CACHE_STAT_ADD(tblNum, 1); - } - - ctgDebug("tbmeta updated to cache, dbFName:%s, tbName:%s, tbType:%d, suid:%" PRIx64, dbFName, tbName, meta->tableType, - meta->suid); - ctgdShowTableMeta(pCtg, tbName, meta); - - if (!isStb) { - CTG_UNLOCK(CTG_READ, &tbCache->metaLock); - return TSDB_CODE_SUCCESS; - } - - STableMeta *tbMeta = taosHashGet(tbCache->metaCache, tbName, strlen(tbName)); - if (taosHashPut(tbCache->stbCache, &meta->suid, sizeof(meta->suid), &tbMeta, POINTER_BYTES) != 0) { - CTG_UNLOCK(CTG_WRITE, &tbCache->stbLock); - CTG_UNLOCK(CTG_READ, &tbCache->metaLock); - ctgError("taosHashPut stable to stable cache failed, suid:%" PRIx64, meta->suid); - CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); - } - - CTG_CACHE_STAT_ADD(stblNum, 1); - - CTG_UNLOCK(CTG_WRITE, &tbCache->stbLock); - - CTG_UNLOCK(CTG_READ, &tbCache->metaLock); +#include "catalogInt.h" +#include "systable.h" +#include "tref.h" - ctgDebug("stb updated to stbCache, dbFName:%s, tbName:%s, tbType:%d, suid:%" PRIx64 ",ma:%p", dbFName, tbName, - meta->tableType, meta->suid, tbMeta); +SCatalogMgmt gCtgMgmt = {0}; - SSTableMetaVersion metaRent = { - .dbId = dbId, .suid = meta->suid, .sversion = meta->sversion, .tversion = meta->tversion}; - strcpy(metaRent.dbFName, dbFName); - strcpy(metaRent.stbName, tbName); - CTG_ERR_RET(ctgMetaRentAdd(&pCtg->stbRent, &metaRent, metaRent.suid, sizeof(SSTableMetaVersion))); - return TSDB_CODE_SUCCESS; -} +int32_t ctgRemoveTbMetaFromCache(SCatalog* pCtg, SName* pTableName, bool syncReq) { + int32_t code = 0; + STableMeta *tblMeta = NULL; + SCtgTbMetaCtx tbCtx = {0}; + tbCtx.flag = CTG_FLAG_UNKNOWN_STB; + tbCtx.pName = pTableName; + + CTG_ERR_JRET(ctgReadTbMetaFromCache(pCtg, &tbCtx, &tblMeta)); -int32_t ctgCloneVgInfo(SDBVgInfo *src, SDBVgInfo **dst) { - *dst = taosMemoryMalloc(sizeof(SDBVgInfo)); - if (NULL == *dst) { - qError("malloc %d failed", (int32_t)sizeof(SDBVgInfo)); - CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); + if (NULL == tblMeta) { + ctgDebug("table already not in cache, db:%s, tblName:%s", pTableName->dbname, pTableName->tname); + return TSDB_CODE_SUCCESS; } - memcpy(*dst, src, sizeof(SDBVgInfo)); - - size_t hashSize = taosHashGetSize(src->vgHash); - (*dst)->vgHash = taosHashInit(hashSize, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK); - if (NULL == (*dst)->vgHash) { - qError("taosHashInit %d failed", (int32_t)hashSize); - taosMemoryFreeClear(*dst); - CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); + char dbFName[TSDB_DB_FNAME_LEN]; + tNameGetFullDbName(pTableName, dbFName); + + if (TSDB_SUPER_TABLE == tblMeta->tableType) { + CTG_ERR_JRET(ctgDropStbMetaEnqueue(pCtg, dbFName, tbCtx.tbInfo.dbId, pTableName->tname, tblMeta->suid, syncReq)); + } else { + CTG_ERR_JRET(ctgDropTbMetaEnqueue(pCtg, dbFName, tbCtx.tbInfo.dbId, pTableName->tname, syncReq)); } + +_return: - int32_t *vgId = NULL; - void *pIter = taosHashIterate(src->vgHash, NULL); - while (pIter) { - vgId = taosHashGetKey(pIter, NULL); - - if (taosHashPut((*dst)->vgHash, (void *)vgId, sizeof(int32_t), pIter, sizeof(SVgroupInfo))) { - qError("taosHashPut failed, hashSize:%d", (int32_t)hashSize); - taosHashCancelIterate(src->vgHash, pIter); - taosHashCleanup((*dst)->vgHash); - taosMemoryFreeClear(*dst); - CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); - } - - pIter = taosHashIterate(src->vgHash, pIter); - } + taosMemoryFreeClear(tblMeta); - return TSDB_CODE_SUCCESS; + CTG_RET(code); } -int32_t ctgGetDBVgInfo(SCatalog *pCtg, void *pRpc, const SEpSet *pMgmtEps, const char *dbFName, SCtgDBCache **dbCache, - SDBVgInfo **pInfo) { - bool inCache = false; +int32_t ctgGetDBVgInfo(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, const char* dbFName, SCtgDBCache** dbCache, SDBVgInfo **pInfo) { int32_t code = 0; - CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, dbFName, dbCache, &inCache)); + CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, dbFName, dbCache)); - if (inCache) { + if (*dbCache) { return TSDB_CODE_SUCCESS; } - SUseDbOutput DbOut = {0}; + SUseDbOutput DbOut = {0}; SBuildUseDBInput input = {0}; tstrncpy(input.db, dbFName, tListLen(input.db)); input.vgVersion = CTG_DEFAULT_INVALID_VERSION; - code = ctgGetDBVgInfoFromMnode(pCtg, pRpc, pMgmtEps, &input, &DbOut); - if (code) { - if (CTG_DB_NOT_EXIST(code) && input.vgVersion > CTG_DEFAULT_INVALID_VERSION) { - ctgDebug("db no longer exist, dbFName:%s, dbId:%" PRIx64, input.db, input.dbId); - ctgPushRmDBMsgInQueue(pCtg, input.db, input.dbId); - } - - CTG_ERR_RET(code); - } + CTG_ERR_RET(ctgGetDBVgInfoFromMnode(pCtg, pTrans, pMgmtEps, &input, &DbOut, NULL)); CTG_ERR_JRET(ctgCloneVgInfo(DbOut.dbVgroup, pInfo)); - CTG_ERR_RET(ctgPushUpdateVgMsgInQueue(pCtg, dbFName, DbOut.dbId, DbOut.dbVgroup, false)); + CTG_ERR_RET(ctgUpdateVgroupEnqueue(pCtg, dbFName, DbOut.dbId, DbOut.dbVgroup, false)); return TSDB_CODE_SUCCESS; @@ -1809,81 +80,53 @@ _return: taosMemoryFreeClear(*pInfo); *pInfo = DbOut.dbVgroup; - + CTG_RET(code); } -int32_t ctgRefreshDBVgInfo(SCatalog *pCtg, void *pRpc, const SEpSet *pMgmtEps, const char *dbFName) { - bool inCache = false; - int32_t code = 0; - SCtgDBCache *dbCache = NULL; +int32_t ctgRefreshDBVgInfo(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, const char* dbFName) { + int32_t code = 0; + SCtgDBCache* dbCache = NULL; - CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, dbFName, &dbCache, &inCache)); + CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, dbFName, &dbCache)); - SUseDbOutput DbOut = {0}; + SUseDbOutput DbOut = {0}; SBuildUseDBInput input = {0}; tstrncpy(input.db, dbFName, tListLen(input.db)); - if (inCache) { + if (NULL != dbCache) { input.dbId = dbCache->dbId; ctgReleaseVgInfo(dbCache); ctgReleaseDBCache(pCtg, dbCache); } - + input.vgVersion = CTG_DEFAULT_INVALID_VERSION; input.numOfTable = 0; - code = ctgGetDBVgInfoFromMnode(pCtg, pRpc, pMgmtEps, &input, &DbOut); + code = ctgGetDBVgInfoFromMnode(pCtg, pTrans, pMgmtEps, &input, &DbOut, NULL); if (code) { - if (CTG_DB_NOT_EXIST(code) && inCache) { + if (CTG_DB_NOT_EXIST(code) && (NULL != dbCache)) { ctgDebug("db no longer exist, dbFName:%s, dbId:%" PRIx64, input.db, input.dbId); - ctgPushRmDBMsgInQueue(pCtg, input.db, input.dbId); + ctgDropDbCacheEnqueue(pCtg, input.db, input.dbId); } CTG_ERR_RET(code); } - CTG_ERR_RET(ctgPushUpdateVgMsgInQueue(pCtg, dbFName, DbOut.dbId, DbOut.dbVgroup, true)); + CTG_ERR_RET(ctgUpdateVgroupEnqueue(pCtg, dbFName, DbOut.dbId, DbOut.dbVgroup, true)); return TSDB_CODE_SUCCESS; } -int32_t ctgCloneMetaOutput(STableMetaOutput *output, STableMetaOutput **pOutput) { - *pOutput = taosMemoryMalloc(sizeof(STableMetaOutput)); - if (NULL == *pOutput) { - qError("malloc %d failed", (int32_t)sizeof(STableMetaOutput)); - CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); - } - - memcpy(*pOutput, output, sizeof(STableMetaOutput)); - - if (output->tbMeta) { - int32_t metaSize = CTG_META_SIZE(output->tbMeta); - (*pOutput)->tbMeta = taosMemoryMalloc(metaSize); - if (NULL == (*pOutput)->tbMeta) { - qError("malloc %d failed", (int32_t)sizeof(STableMetaOutput)); - taosMemoryFreeClear(*pOutput); - CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); - } - - memcpy((*pOutput)->tbMeta, output->tbMeta, metaSize); - } - - return TSDB_CODE_SUCCESS; -} -int32_t ctgRefreshTblMeta(SCatalog *pCtg, void *pTrans, const SEpSet *pMgmtEps, const SName *pTableName, int32_t flag, - STableMetaOutput **pOutput, bool syncReq) { - if (NULL == pCtg || NULL == pTrans || NULL == pMgmtEps || NULL == pTableName) { - CTG_ERR_RET(TSDB_CODE_CTG_INVALID_INPUT); - } +int32_t ctgRefreshTbMeta(CTG_PARAMS, SCtgTbMetaCtx* ctx, STableMetaOutput **pOutput, bool syncReq) { SVgroupInfo vgroupInfo = {0}; - int32_t code = 0; + int32_t code = 0; - if (!CTG_FLAG_IS_SYS_DB(flag)) { - CTG_ERR_RET(catalogGetTableHashVgroup(pCtg, pTrans, pMgmtEps, pTableName, &vgroupInfo)); + if (!CTG_FLAG_IS_SYS_DB(ctx->flag)) { + CTG_ERR_RET(catalogGetTableHashVgroup(CTG_PARAMS_LIST(), ctx->pName, &vgroupInfo)); } STableMetaOutput moutput = {0}; @@ -1893,75 +136,72 @@ int32_t ctgRefreshTblMeta(SCatalog *pCtg, void *pTrans, const SEpSet *pMgmtEps, CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); } - if (CTG_FLAG_IS_SYS_DB(flag)) { - ctgDebug("will refresh tbmeta, supposed in information_schema, tbName:%s", tNameGetTableName(pTableName)); + if (CTG_FLAG_IS_SYS_DB(ctx->flag)) { + ctgDebug("will refresh tbmeta, supposed in information_schema, tbName:%s", tNameGetTableName(ctx->pName)); - CTG_ERR_JRET(ctgGetTableMetaFromMnodeImpl(pCtg, pTrans, pMgmtEps, (char *)pTableName->dbname, - (char *)pTableName->tname, output)); - } else if (CTG_FLAG_IS_STB(flag)) { - ctgDebug("will refresh tbmeta, supposed to be stb, tbName:%s", tNameGetTableName(pTableName)); + CTG_ERR_JRET(ctgGetTbMetaFromMnodeImpl(CTG_PARAMS_LIST(), (char *)ctx->pName->dbname, (char *)ctx->pName->tname, output, NULL)); + } else if (CTG_FLAG_IS_STB(ctx->flag)) { + ctgDebug("will refresh tbmeta, supposed to be stb, tbName:%s", tNameGetTableName(ctx->pName)); // if get from mnode failed, will not try vnode - CTG_ERR_JRET(ctgGetTableMetaFromMnode(pCtg, pTrans, pMgmtEps, pTableName, output)); + CTG_ERR_JRET(ctgGetTbMetaFromMnode(CTG_PARAMS_LIST(), ctx->pName, output, NULL)); if (CTG_IS_META_NULL(output->metaType)) { - CTG_ERR_JRET(ctgGetTableMetaFromVnode(pCtg, pTrans, pMgmtEps, pTableName, &vgroupInfo, output)); + CTG_ERR_JRET(ctgGetTbMetaFromVnode(CTG_PARAMS_LIST(), ctx->pName, &vgroupInfo, output, NULL)); } } else { - ctgDebug("will refresh tbmeta, not supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(pTableName), flag); + ctgDebug("will refresh tbmeta, not supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(ctx->pName), ctx->flag); // if get from vnode failed or no table meta, will not try mnode - CTG_ERR_JRET(ctgGetTableMetaFromVnode(pCtg, pTrans, pMgmtEps, pTableName, &vgroupInfo, output)); + CTG_ERR_JRET(ctgGetTbMetaFromVnode(CTG_PARAMS_LIST(), ctx->pName, &vgroupInfo, output, NULL)); if (CTG_IS_META_TABLE(output->metaType) && TSDB_SUPER_TABLE == output->tbMeta->tableType) { - ctgDebug("will continue to refresh tbmeta since got stb, tbName:%s", tNameGetTableName(pTableName)); + ctgDebug("will continue to refresh tbmeta since got stb, tbName:%s", tNameGetTableName(ctx->pName)); taosMemoryFreeClear(output->tbMeta); - - CTG_ERR_JRET(ctgGetTableMetaFromMnodeImpl(pCtg, pTrans, pMgmtEps, output->dbFName, output->tbName, output)); + + CTG_ERR_JRET(ctgGetTbMetaFromMnodeImpl(CTG_PARAMS_LIST(), output->dbFName, output->tbName, output, NULL)); } else if (CTG_IS_META_BOTH(output->metaType)) { int32_t exist = 0; - if (!CTG_FLAG_IS_FORCE_UPDATE(flag)) { - CTG_ERR_JRET(ctgIsTableMetaExistInCache(pCtg, output->dbFName, output->tbName, &exist)); + if (!CTG_FLAG_IS_FORCE_UPDATE(ctx->flag)) { + CTG_ERR_JRET(ctgTbMetaExistInCache(pCtg, output->dbFName, output->tbName, &exist)); } if (0 == exist) { - CTG_ERR_JRET(ctgGetTableMetaFromMnodeImpl(pCtg, pTrans, pMgmtEps, output->dbFName, output->tbName, &moutput)); + CTG_ERR_JRET(ctgGetTbMetaFromMnodeImpl(CTG_PARAMS_LIST(), output->dbFName, output->tbName, &moutput, NULL)); if (CTG_IS_META_NULL(moutput.metaType)) { SET_META_TYPE_NULL(output->metaType); } - + taosMemoryFreeClear(output->tbMeta); output->tbMeta = moutput.tbMeta; moutput.tbMeta = NULL; } else { taosMemoryFreeClear(output->tbMeta); - - SET_META_TYPE_CTABLE(output->metaType); + + SET_META_TYPE_CTABLE(output->metaType); } } } if (CTG_IS_META_NULL(output->metaType)) { - ctgError("no tbmeta got, tbNmae:%s", tNameGetTableName(pTableName)); - catalogRemoveTableMeta(pCtg, pTableName); + ctgError("no tbmeta got, tbNmae:%s", tNameGetTableName(ctx->pName)); + ctgRemoveTbMetaFromCache(pCtg, ctx->pName, false); CTG_ERR_JRET(CTG_ERR_CODE_TABLE_NOT_EXIST); } if (CTG_IS_META_TABLE(output->metaType)) { - ctgDebug("tbmeta got, dbFName:%s, tbName:%s, tbType:%d", output->dbFName, output->tbName, - output->tbMeta->tableType); + ctgDebug("tbmeta got, dbFName:%s, tbName:%s, tbType:%d", output->dbFName, output->tbName, output->tbMeta->tableType); } else { - ctgDebug("tbmeta got, dbFName:%s, tbName:%s, tbType:%d, stbMetaGot:%d", output->dbFName, output->ctbName, - output->ctbMeta.tableType, CTG_IS_META_BOTH(output->metaType)); + ctgDebug("tbmeta got, dbFName:%s, tbName:%s, tbType:%d, stbMetaGot:%d", output->dbFName, output->ctbName, output->ctbMeta.tableType, CTG_IS_META_BOTH(output->metaType)); } if (pOutput) { CTG_ERR_JRET(ctgCloneMetaOutput(output, pOutput)); } - CTG_ERR_JRET(ctgPushUpdateTblMsgInQueue(pCtg, output, syncReq)); + CTG_ERR_JRET(ctgUpdateTbMetaEnqueue(pCtg, output, syncReq)); return TSDB_CODE_SUCCESS; @@ -1969,463 +209,170 @@ _return: taosMemoryFreeClear(output->tbMeta); taosMemoryFreeClear(output); - + CTG_RET(code); } -int32_t ctgGetTableMeta(SCatalog *pCtg, void *pRpc, const SEpSet *pMgmtEps, const SName *pTableName, - STableMeta **pTableMeta, int32_t flag) { - if (NULL == pCtg || NULL == pRpc || NULL == pMgmtEps || NULL == pTableName || NULL == pTableMeta) { - CTG_ERR_RET(TSDB_CODE_CTG_INVALID_INPUT); - } - - bool inCache = false; - int32_t code = 0; - uint64_t dbId = 0; - uint64_t suid = 0; - STableMetaOutput *output = NULL; - - if (CTG_IS_SYS_DBNAME(pTableName->dbname)) { - CTG_FLAG_SET_SYS_DB(flag); - } - - CTG_ERR_RET(ctgGetTableMetaFromCache(pCtg, pTableName, pTableMeta, &inCache, flag, &dbId)); - - int32_t tbType = 0; - - if (inCache) { - if (CTG_FLAG_MATCH_STB(flag, (*pTableMeta)->tableType) && - ((!CTG_FLAG_IS_FORCE_UPDATE(flag)) || (CTG_FLAG_IS_SYS_DB(flag)))) { - goto _return; - } - - tbType = (*pTableMeta)->tableType; - suid = (*pTableMeta)->suid; - - taosMemoryFreeClear(*pTableMeta); - } - - if (CTG_FLAG_IS_UNKNOWN_STB(flag)) { - CTG_FLAG_SET_STB(flag, tbType); - } - - while (true) { - CTG_ERR_JRET(ctgRefreshTblMeta(pCtg, pRpc, pMgmtEps, pTableName, flag, &output, false)); - - if (CTG_IS_META_TABLE(output->metaType)) { - *pTableMeta = output->tbMeta; - goto _return; - } - - if (CTG_IS_META_BOTH(output->metaType)) { - memcpy(output->tbMeta, &output->ctbMeta, sizeof(output->ctbMeta)); - - *pTableMeta = output->tbMeta; - goto _return; - } - - if ((!CTG_IS_META_CTABLE(output->metaType)) || output->tbMeta) { - ctgError("invalid metaType:%d", output->metaType); - taosMemoryFreeClear(output->tbMeta); - CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR); - } - - // HANDLE ONLY CHILD TABLE META - - SName stbName = *pTableName; - strcpy(stbName.tname, output->tbName); - - taosMemoryFreeClear(output->tbMeta); - - CTG_ERR_JRET(ctgGetTableMetaFromCache(pCtg, &stbName, pTableMeta, &inCache, flag, NULL)); - if (!inCache) { - ctgDebug("stb no longer exist, dbFName:%s, tbName:%s", output->dbFName, pTableName->tname); - - continue; - } - - memcpy(*pTableMeta, &output->ctbMeta, sizeof(output->ctbMeta)); - - break; - } - -_return: - - if (CTG_TABLE_NOT_EXIST(code) && inCache) { - char dbFName[TSDB_DB_FNAME_LEN] = {0}; - if (CTG_FLAG_IS_SYS_DB(flag)) { - strcpy(dbFName, pTableName->dbname); - } else { - tNameGetFullDbName(pTableName, dbFName); - } - - if (TSDB_SUPER_TABLE == tbType) { - ctgPushRmStbMsgInQueue(pCtg, dbFName, dbId, pTableName->tname, suid, false); - } else { - ctgPushRmTblMsgInQueue(pCtg, dbFName, dbId, pTableName->tname, false); - } +int32_t ctgGetTbMetaFromCache(CTG_PARAMS, SCtgTbMetaCtx* ctx, STableMeta** pTableMeta) { + if (CTG_IS_SYS_DBNAME(ctx->pName->dbname)) { + CTG_FLAG_SET_SYS_DB(ctx->flag); } - taosMemoryFreeClear(output); + CTG_ERR_RET(ctgReadTbMetaFromCache(pCtg, ctx, pTableMeta)); if (*pTableMeta) { - ctgDebug("tbmeta returned, tbName:%s, tbType:%d", pTableName->tname, (*pTableMeta)->tableType); - ctgdShowTableMeta(pCtg, pTableName->tname, *pTableMeta); - } - - CTG_RET(code); -} - -int32_t ctgChkAuth(SCatalog *pCtg, void *pRpc, const SEpSet *pMgmtEps, const char *user, const char *dbFName, - AUTH_TYPE type, bool *pass) { - bool inCache = false; - int32_t code = 0; - - *pass = false; - - CTG_ERR_RET(ctgChkAuthFromCache(pCtg, user, dbFName, type, &inCache, pass)); - - if (inCache) { - return TSDB_CODE_SUCCESS; - } - - SGetUserAuthRsp authRsp = {0}; - CTG_ERR_RET(ctgGetUserDbAuthFromMnode(pCtg, pRpc, pMgmtEps, user, &authRsp)); - - if (authRsp.superAuth) { - *pass = true; - goto _return; - } - - if (authRsp.createdDbs && taosHashGet(authRsp.createdDbs, dbFName, strlen(dbFName))) { - *pass = true; - goto _return; - } - - if (authRsp.readDbs && taosHashGet(authRsp.readDbs, dbFName, strlen(dbFName)) && type == AUTH_TYPE_READ) { - *pass = true; - } - - if (authRsp.writeDbs && taosHashGet(authRsp.writeDbs, dbFName, strlen(dbFName)) && type == AUTH_TYPE_WRITE) { - *pass = true; - } - -_return: - - ctgPushUpdateUserMsgInQueue(pCtg, &authRsp, false); - - return TSDB_CODE_SUCCESS; -} - -int32_t ctgActUpdateVg(SCtgMetaAction *action) { - int32_t code = 0; - SCtgUpdateVgMsg *msg = action->data; - - CTG_ERR_JRET(ctgUpdateDBVgInfo(msg->pCtg, msg->dbFName, msg->dbId, &msg->dbInfo)); - -_return: - - ctgFreeVgInfo(msg->dbInfo); - taosMemoryFreeClear(msg); - - CTG_RET(code); -} - -int32_t ctgActRemoveDB(SCtgMetaAction *action) { - int32_t code = 0; - SCtgRemoveDBMsg *msg = action->data; - SCatalog *pCtg = msg->pCtg; - - SCtgDBCache *dbCache = NULL; - ctgGetDBCache(msg->pCtg, msg->dbFName, &dbCache); - if (NULL == dbCache) { - goto _return; - } - - if (dbCache->dbId != msg->dbId) { - ctgInfo("dbId already updated, dbFName:%s, dbId:%" PRIx64 ", targetId:%" PRIx64, msg->dbFName, dbCache->dbId, - msg->dbId); - goto _return; - } - - CTG_ERR_JRET(ctgRemoveDB(pCtg, dbCache, msg->dbFName)); - -_return: - - taosMemoryFreeClear(msg); - - CTG_RET(code); -} - -int32_t ctgActUpdateTbl(SCtgMetaAction *action) { - int32_t code = 0; - SCtgUpdateTblMsg *msg = action->data; - SCatalog *pCtg = msg->pCtg; - STableMetaOutput *output = msg->output; - SCtgDBCache *dbCache = NULL; - - if ((!CTG_IS_META_CTABLE(output->metaType)) && NULL == output->tbMeta) { - ctgError("no valid tbmeta got from meta rsp, dbFName:%s, tbName:%s", output->dbFName, output->tbName); - CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR); - } - - if (CTG_IS_META_BOTH(output->metaType) && TSDB_SUPER_TABLE != output->tbMeta->tableType) { - ctgError("table type error, expected:%d, actual:%d", TSDB_SUPER_TABLE, output->tbMeta->tableType); - CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR); - } - - CTG_ERR_JRET(ctgGetAddDBCache(pCtg, output->dbFName, output->dbId, &dbCache)); - if (NULL == dbCache) { - ctgInfo("conflict db update, ignore this update, dbFName:%s, dbId:%" PRIx64, output->dbFName, output->dbId); - CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR); - } - - if (CTG_IS_META_TABLE(output->metaType) || CTG_IS_META_BOTH(output->metaType)) { - int32_t metaSize = CTG_META_SIZE(output->tbMeta); - - CTG_ERR_JRET( - ctgUpdateTblMeta(pCtg, dbCache, output->dbFName, output->dbId, output->tbName, output->tbMeta, metaSize)); - } - - if (CTG_IS_META_CTABLE(output->metaType) || CTG_IS_META_BOTH(output->metaType)) { - CTG_ERR_JRET(ctgUpdateTblMeta(pCtg, dbCache, output->dbFName, output->dbId, output->ctbName, - (STableMeta *)&output->ctbMeta, sizeof(output->ctbMeta))); - } - -_return: - - if (output) { - taosMemoryFreeClear(output->tbMeta); - taosMemoryFreeClear(output); - } - - taosMemoryFreeClear(msg); - - CTG_RET(code); -} - -int32_t ctgActRemoveStb(SCtgMetaAction *action) { - int32_t code = 0; - SCtgRemoveStbMsg *msg = action->data; - SCatalog *pCtg = msg->pCtg; - - SCtgDBCache *dbCache = NULL; - ctgGetDBCache(pCtg, msg->dbFName, &dbCache); - if (NULL == dbCache) { - return TSDB_CODE_SUCCESS; - } - - if (msg->dbId && (dbCache->dbId != msg->dbId)) { - ctgDebug("dbId already modified, dbFName:%s, current:%" PRIx64 ", dbId:%" PRIx64 ", stb:%s, suid:%" PRIx64, - msg->dbFName, dbCache->dbId, msg->dbId, msg->stbName, msg->suid); - return TSDB_CODE_SUCCESS; - } - - CTG_LOCK(CTG_WRITE, &dbCache->tbCache.stbLock); - if (taosHashRemove(dbCache->tbCache.stbCache, &msg->suid, sizeof(msg->suid))) { - ctgDebug("stb not exist in stbCache, may be removed, dbFName:%s, stb:%s, suid:%" PRIx64, msg->dbFName, msg->stbName, - msg->suid); - } else { - CTG_CACHE_STAT_SUB(stblNum, 1); - } - - CTG_LOCK(CTG_READ, &dbCache->tbCache.metaLock); - if (taosHashRemove(dbCache->tbCache.metaCache, msg->stbName, strlen(msg->stbName))) { - ctgError("stb not exist in cache, dbFName:%s, stb:%s, suid:%" PRIx64, msg->dbFName, msg->stbName, msg->suid); - } else { - CTG_CACHE_STAT_SUB(tblNum, 1); - } - CTG_UNLOCK(CTG_READ, &dbCache->tbCache.metaLock); - - CTG_UNLOCK(CTG_WRITE, &dbCache->tbCache.stbLock); - - ctgInfo("stb removed from cache, dbFName:%s, stbName:%s, suid:%" PRIx64, msg->dbFName, msg->stbName, msg->suid); - - CTG_ERR_JRET(ctgMetaRentRemove(&msg->pCtg->stbRent, msg->suid, ctgStbVersionSortCompare, ctgStbVersionSearchCompare)); - - ctgDebug("stb removed from rent, dbFName:%s, stbName:%s, suid:%" PRIx64, msg->dbFName, msg->stbName, msg->suid); - -_return: - - taosMemoryFreeClear(msg); - - CTG_RET(code); -} - -int32_t ctgActRemoveTbl(SCtgMetaAction *action) { - int32_t code = 0; - SCtgRemoveTblMsg *msg = action->data; - SCatalog *pCtg = msg->pCtg; - - SCtgDBCache *dbCache = NULL; - ctgGetDBCache(pCtg, msg->dbFName, &dbCache); - if (NULL == dbCache) { - return TSDB_CODE_SUCCESS; - } - - if (dbCache->dbId != msg->dbId) { - ctgDebug("dbId already modified, dbFName:%s, current:%" PRIx64 ", dbId:%" PRIx64 ", tbName:%s", msg->dbFName, - dbCache->dbId, msg->dbId, msg->tbName); - return TSDB_CODE_SUCCESS; - } - - CTG_LOCK(CTG_READ, &dbCache->tbCache.metaLock); - if (taosHashRemove(dbCache->tbCache.metaCache, msg->tbName, strlen(msg->tbName))) { - CTG_UNLOCK(CTG_READ, &dbCache->tbCache.metaLock); - ctgError("stb not exist in cache, dbFName:%s, tbName:%s", msg->dbFName, msg->tbName); - CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR); - } else { - CTG_CACHE_STAT_SUB(tblNum, 1); - } - CTG_UNLOCK(CTG_READ, &dbCache->tbCache.metaLock); - - ctgInfo("table removed from cache, dbFName:%s, tbName:%s", msg->dbFName, msg->tbName); - -_return: - - taosMemoryFreeClear(msg); - - CTG_RET(code); -} - -int32_t ctgActUpdateUser(SCtgMetaAction *action) { - int32_t code = 0; - SCtgUpdateUserMsg *msg = action->data; - SCatalog *pCtg = msg->pCtg; - - if (NULL == pCtg->userCache) { - pCtg->userCache = taosHashInit(gCtgMgmt.cfg.maxUserCacheNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), - false, HASH_ENTRY_LOCK); - if (NULL == pCtg->userCache) { - ctgError("taosHashInit %d user cache failed", gCtgMgmt.cfg.maxUserCacheNum); - CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY); - } - } - - SCtgUserAuth *pUser = (SCtgUserAuth *)taosHashGet(pCtg->userCache, msg->userAuth.user, strlen(msg->userAuth.user)); - if (NULL == pUser) { - SCtgUserAuth userAuth = {0}; - - userAuth.version = msg->userAuth.version; - userAuth.superUser = msg->userAuth.superAuth; - userAuth.createdDbs = msg->userAuth.createdDbs; - userAuth.readDbs = msg->userAuth.readDbs; - userAuth.writeDbs = msg->userAuth.writeDbs; - - if (taosHashPut(pCtg->userCache, msg->userAuth.user, sizeof(msg->userAuth.user), &userAuth, sizeof(userAuth))) { - ctgError("taosHashPut user %s to cache failed", msg->userAuth.user); - CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY); + if (CTG_FLAG_MATCH_STB(ctx->flag, (*pTableMeta)->tableType) && ((!CTG_FLAG_IS_FORCE_UPDATE(ctx->flag)) || (CTG_FLAG_IS_SYS_DB(ctx->flag)))) { + return TSDB_CODE_SUCCESS; } - taosMemoryFreeClear(msg); + taosMemoryFreeClear(*pTableMeta); + } - return TSDB_CODE_SUCCESS; + if (CTG_FLAG_IS_UNKNOWN_STB(ctx->flag)) { + CTG_FLAG_SET_STB(ctx->flag, ctx->tbInfo.tbType); } + + return TSDB_CODE_SUCCESS; +} - pUser->version = msg->userAuth.version; - CTG_LOCK(CTG_WRITE, &pUser->lock); +int32_t ctgGetTbMeta(CTG_PARAMS, SCtgTbMetaCtx* ctx, STableMeta** pTableMeta) { + int32_t code = 0; + STableMetaOutput *output = NULL; - taosHashCleanup(pUser->createdDbs); - pUser->createdDbs = msg->userAuth.createdDbs; - msg->userAuth.createdDbs = NULL; + CTG_ERR_RET(ctgGetTbMetaFromCache(CTG_PARAMS_LIST(), ctx, pTableMeta)); + if (*pTableMeta) { + goto _return; + } - taosHashCleanup(pUser->readDbs); - pUser->readDbs = msg->userAuth.readDbs; - msg->userAuth.readDbs = NULL; + while (true) { + CTG_ERR_JRET(ctgRefreshTbMeta(CTG_PARAMS_LIST(), ctx, &output, false)); - taosHashCleanup(pUser->writeDbs); - pUser->writeDbs = msg->userAuth.writeDbs; - msg->userAuth.writeDbs = NULL; + if (CTG_IS_META_TABLE(output->metaType)) { + *pTableMeta = output->tbMeta; + goto _return; + } - CTG_UNLOCK(CTG_WRITE, &pUser->lock); + if (CTG_IS_META_BOTH(output->metaType)) { + memcpy(output->tbMeta, &output->ctbMeta, sizeof(output->ctbMeta)); + + *pTableMeta = output->tbMeta; + goto _return; + } -_return: + if ((!CTG_IS_META_CTABLE(output->metaType)) || output->tbMeta) { + ctgError("invalid metaType:%d", output->metaType); + taosMemoryFreeClear(output->tbMeta); + CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR); + } - taosHashCleanup(msg->userAuth.createdDbs); - taosHashCleanup(msg->userAuth.readDbs); - taosHashCleanup(msg->userAuth.writeDbs); + // HANDLE ONLY CHILD TABLE META - taosMemoryFreeClear(msg); + taosMemoryFreeClear(output->tbMeta); - CTG_RET(code); -} + SName stbName = *ctx->pName; + strcpy(stbName.tname, output->tbName); + SCtgTbMetaCtx stbCtx = {0}; + stbCtx.flag = ctx->flag; + stbCtx.pName = &stbName; + + CTG_ERR_JRET(ctgReadTbMetaFromCache(pCtg, &stbCtx, pTableMeta)); + if (NULL == *pTableMeta) { + ctgDebug("stb no longer exist, dbFName:%s, tbName:%s", output->dbFName, ctx->pName->tname); + continue; + } -void *ctgUpdateThreadFunc(void *param) { - setThreadName("catalog"); + memcpy(*pTableMeta, &output->ctbMeta, sizeof(output->ctbMeta)); - qInfo("catalog update thread started"); + break; + } - CTG_LOCK(CTG_READ, &gCtgMgmt.lock); +_return: - while (true) { - if (tsem_wait(&gCtgMgmt.queue.reqSem)) { - qError("ctg tsem_wait failed, error:%s", tstrerror(TAOS_SYSTEM_ERROR(errno))); + if (CTG_TABLE_NOT_EXIST(code) && ctx->tbInfo.inCache) { + char dbFName[TSDB_DB_FNAME_LEN] = {0}; + if (CTG_FLAG_IS_SYS_DB(ctx->flag)) { + strcpy(dbFName, ctx->pName->dbname); + } else { + tNameGetFullDbName(ctx->pName, dbFName); } - if (atomic_load_8((int8_t *)&gCtgMgmt.exit)) { - tsem_post(&gCtgMgmt.queue.rspSem); - break; + if (TSDB_SUPER_TABLE == ctx->tbInfo.tbType) { + ctgDropStbMetaEnqueue(pCtg, dbFName, ctx->tbInfo.dbId, ctx->pName->tname, ctx->tbInfo.suid, false); + } else { + ctgDropTbMetaEnqueue(pCtg, dbFName, ctx->tbInfo.dbId, ctx->pName->tname, false); } + } - SCtgMetaAction *action = NULL; - ctgPopAction(&action); - SCatalog *pCtg = ((SCtgUpdateMsgHeader *)action->data)->pCtg; - - ctgDebug("process [%s] action", gCtgAction[action->act].name); + taosMemoryFreeClear(output); - (*gCtgAction[action->act].func)(action); + if (*pTableMeta) { + ctgDebug("tbmeta returned, tbName:%s, tbType:%d", ctx->pName->tname, (*pTableMeta)->tableType); + ctgdShowTableMeta(pCtg, ctx->pName->tname, *pTableMeta); + } - gCtgMgmt.queue.seqDone = action->seqId; + CTG_RET(code); +} - if (action->syncReq) { - tsem_post(&gCtgMgmt.queue.rspSem); - } - CTG_RUNTIME_STAT_ADD(qDoneNum, 1); +int32_t ctgChkAuth(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, const char* user, const char* dbFName, AUTH_TYPE type, bool *pass) { + bool inCache = false; + int32_t code = 0; + + *pass = false; + + CTG_ERR_RET(ctgChkAuthFromCache(pCtg, user, dbFName, type, &inCache, pass)); - ctgdShowClusterCache(pCtg); + if (inCache) { + return TSDB_CODE_SUCCESS; } - CTG_UNLOCK(CTG_READ, &gCtgMgmt.lock); + SGetUserAuthRsp authRsp = {0}; + CTG_ERR_RET(ctgGetUserDbAuthFromMnode(CTG_PARAMS_LIST(), user, &authRsp, NULL)); + + if (authRsp.superAuth) { + *pass = true; + goto _return; + } - qInfo("catalog update thread stopped"); + if (authRsp.createdDbs && taosHashGet(authRsp.createdDbs, dbFName, strlen(dbFName))) { + *pass = true; + goto _return; + } - return NULL; -} + if (type == AUTH_TYPE_READ && authRsp.readDbs && taosHashGet(authRsp.readDbs, dbFName, strlen(dbFName))) { + *pass = true; + } else if (type == AUTH_TYPE_WRITE && authRsp.writeDbs && taosHashGet(authRsp.writeDbs, dbFName, strlen(dbFName))) { + *pass = true; + } -int32_t ctgStartUpdateThread() { - TdThreadAttr thAttr; - taosThreadAttrInit(&thAttr); - taosThreadAttrSetDetachState(&thAttr, PTHREAD_CREATE_JOINABLE); +_return: - if (taosThreadCreate(&gCtgMgmt.updateThread, &thAttr, ctgUpdateThreadFunc, NULL) != 0) { - terrno = TAOS_SYSTEM_ERROR(errno); - CTG_ERR_RET(terrno); - } + ctgUpdateUserEnqueue(pCtg, &authRsp, false); - taosThreadAttrDestroy(&thAttr); return TSDB_CODE_SUCCESS; } -int32_t ctgGetTableDistVgInfo(SCatalog *pCtg, void *pRpc, const SEpSet *pMgmtEps, const SName *pTableName, - SArray **pVgList) { - STableMeta *tbMeta = NULL; - int32_t code = 0; - SVgroupInfo vgroupInfo = {0}; - SCtgDBCache *dbCache = NULL; - SArray *vgList = NULL; - SDBVgInfo *vgInfo = NULL; +int32_t ctgGetTbDistVgInfo(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, SName* pTableName, SArray** pVgList) { + STableMeta *tbMeta = NULL; + int32_t code = 0; + SVgroupInfo vgroupInfo = {0}; + SCtgDBCache* dbCache = NULL; + SArray *vgList = NULL; + SDBVgInfo *vgInfo = NULL; + SCtgTbMetaCtx ctx = {0}; + ctx.pName = pTableName; + ctx.flag = CTG_FLAG_UNKNOWN_STB; *pVgList = NULL; - - CTG_ERR_JRET(ctgGetTableMeta(pCtg, pRpc, pMgmtEps, pTableName, &tbMeta, CTG_FLAG_UNKNOWN_STB)); + + CTG_ERR_JRET(ctgGetTbMeta(CTG_PARAMS_LIST(), &ctx, &tbMeta)); char db[TSDB_DB_FNAME_LEN] = {0}; tNameGetFullDbName(pTableName, db); - SHashObj *vgHash = NULL; - CTG_ERR_JRET(ctgGetDBVgInfo(pCtg, pRpc, pMgmtEps, db, &dbCache, &vgInfo)); + SHashObj *vgHash = NULL; + CTG_ERR_JRET(ctgGetDBVgInfo(pCtg, pTrans, pMgmtEps, db, &dbCache, &vgInfo)); if (dbCache) { vgHash = dbCache->vgInfo->vgHash; @@ -2439,7 +386,7 @@ int32_t ctgGetTableDistVgInfo(SCatalog *pCtg, void *pRpc, const SEpSet *pMgmtEps // USE HASH METHOD INSTEAD OF VGID IN TBMETA ctgError("invalid method to get none stb vgInfo, tbType:%d", tbMeta->tableType); CTG_ERR_JRET(TSDB_CODE_CTG_INVALID_INPUT); - + #if 0 int32_t vgId = tbMeta->vgId; if (taosHashGetDup(vgHash, &vgId, sizeof(vgId), &vgroupInfo) != 0) { @@ -2460,7 +407,7 @@ int32_t ctgGetTableDistVgInfo(SCatalog *pCtg, void *pRpc, const SEpSet *pMgmtEps *pVgList = vgList; vgList = NULL; -#endif +#endif } _return: @@ -2491,7 +438,7 @@ int32_t catalogInit(SCatalogCfg *cfg) { CTG_ERR_RET(TSDB_CODE_CTG_INVALID_INPUT); } - atomic_store_8((int8_t *)&gCtgMgmt.exit, false); + atomic_store_8((int8_t*)&gCtgMgmt.exit, false); if (cfg) { memcpy(&gCtgMgmt.cfg, cfg, sizeof(*cfg)); @@ -2518,8 +465,7 @@ int32_t catalogInit(SCatalogCfg *cfg) { gCtgMgmt.cfg.stbRentSec = CTG_DEFAULT_RENT_SECOND; } - gCtgMgmt.pCluster = taosHashInit(CTG_DEFAULT_CACHE_CLUSTER_NUMBER, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), - false, HASH_ENTRY_LOCK); + gCtgMgmt.pCluster = taosHashInit(CTG_DEFAULT_CACHE_CLUSTER_NUMBER, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_ENTRY_LOCK); if (NULL == gCtgMgmt.pCluster) { qError("taosHashInit %d cluster cache failed", CTG_DEFAULT_CACHE_CLUSTER_NUMBER); CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR); @@ -2529,7 +475,7 @@ int32_t catalogInit(SCatalogCfg *cfg) { qError("tsem_init failed, error:%s", tstrerror(TAOS_SYSTEM_ERROR(errno))); CTG_ERR_RET(TSDB_CODE_CTG_SYS_ERROR); } - + if (tsem_init(&gCtgMgmt.queue.rspSem, 0, 0)) { qError("tsem_init failed, error:%s", tstrerror(TAOS_SYSTEM_ERROR(errno))); CTG_ERR_RET(TSDB_CODE_CTG_SYS_ERROR); @@ -2542,33 +488,38 @@ int32_t catalogInit(SCatalogCfg *cfg) { } gCtgMgmt.queue.tail = gCtgMgmt.queue.head; + gCtgMgmt.jobPool = taosOpenRef(200, ctgFreeJob); + if (gCtgMgmt.jobPool < 0) { + qError("taosOpenRef failed, error:%s", tstrerror(terrno)); + CTG_ERR_RET(terrno); + } + CTG_ERR_RET(ctgStartUpdateThread()); - qDebug("catalog initialized, maxDb:%u, maxTbl:%u, dbRentSec:%u, stbRentSec:%u", gCtgMgmt.cfg.maxDBCacheNum, - gCtgMgmt.cfg.maxTblCacheNum, gCtgMgmt.cfg.dbRentSec, gCtgMgmt.cfg.stbRentSec); + qDebug("catalog initialized, maxDb:%u, maxTbl:%u, dbRentSec:%u, stbRentSec:%u", gCtgMgmt.cfg.maxDBCacheNum, gCtgMgmt.cfg.maxTblCacheNum, gCtgMgmt.cfg.dbRentSec, gCtgMgmt.cfg.stbRentSec); return TSDB_CODE_SUCCESS; } -int32_t catalogGetHandle(uint64_t clusterId, SCatalog **catalogHandle) { +int32_t catalogGetHandle(uint64_t clusterId, SCatalog** catalogHandle) { if (NULL == catalogHandle) { CTG_ERR_RET(TSDB_CODE_CTG_INVALID_INPUT); } if (NULL == gCtgMgmt.pCluster) { - qError("catalog cluster cache are not ready, clusterId:%" PRIx64, clusterId); + qError("catalog cluster cache are not ready, clusterId:%"PRIx64, clusterId); CTG_ERR_RET(TSDB_CODE_CTG_NOT_READY); } - int32_t code = 0; + int32_t code = 0; SCatalog *clusterCtg = NULL; while (true) { - SCatalog **ctg = (SCatalog **)taosHashGet(gCtgMgmt.pCluster, (char *)&clusterId, sizeof(clusterId)); + SCatalog **ctg = (SCatalog **)taosHashGet(gCtgMgmt.pCluster, (char*)&clusterId, sizeof(clusterId)); if (ctg && (*ctg)) { *catalogHandle = *ctg; - qDebug("got catalog handle from cache, clusterId:%" PRIx64 ", CTG:%p", clusterId, *ctg); + qDebug("got catalog handle from cache, clusterId:%"PRIx64", CTG:%p", clusterId, *ctg); return TSDB_CODE_SUCCESS; } @@ -2583,8 +534,7 @@ int32_t catalogGetHandle(uint64_t clusterId, SCatalog **catalogHandle) { CTG_ERR_JRET(ctgMetaRentInit(&clusterCtg->dbRent, gCtgMgmt.cfg.dbRentSec, CTG_RENT_DB)); CTG_ERR_JRET(ctgMetaRentInit(&clusterCtg->stbRent, gCtgMgmt.cfg.stbRentSec, CTG_RENT_STABLE)); - clusterCtg->dbCache = taosHashInit(gCtgMgmt.cfg.maxDBCacheNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), - false, HASH_ENTRY_LOCK); + clusterCtg->dbCache = taosHashInit(gCtgMgmt.cfg.maxDBCacheNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_ENTRY_LOCK); if (NULL == clusterCtg->dbCache) { qError("taosHashInit %d dbCache failed", CTG_DEFAULT_CACHE_DB_NUMBER); CTG_ERR_JRET(TSDB_CODE_CTG_MEM_ERROR); @@ -2596,12 +546,12 @@ int32_t catalogGetHandle(uint64_t clusterId, SCatalog **catalogHandle) { ctgFreeHandle(clusterCtg); continue; } - - qError("taosHashPut CTG to cache failed, clusterId:%" PRIx64, clusterId); + + qError("taosHashPut CTG to cache failed, clusterId:%"PRIx64, clusterId); CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR); } - qDebug("add CTG to cache, clusterId:%" PRIx64 ", CTG:%p", clusterId, clusterCtg); + qDebug("add CTG to cache, clusterId:%"PRIx64", CTG:%p", clusterId, clusterCtg); break; } @@ -2609,36 +559,36 @@ int32_t catalogGetHandle(uint64_t clusterId, SCatalog **catalogHandle) { *catalogHandle = clusterCtg; CTG_CACHE_STAT_ADD(clusterNum, 1); - + return TSDB_CODE_SUCCESS; _return: ctgFreeHandle(clusterCtg); - + CTG_RET(code); } -void catalogFreeHandle(SCatalog *pCtg) { +void catalogFreeHandle(SCatalog* pCtg) { if (NULL == pCtg) { return; } if (taosHashRemove(gCtgMgmt.pCluster, &pCtg->clusterId, sizeof(pCtg->clusterId))) { - ctgWarn("taosHashRemove from cluster failed, may already be freed, clusterId:%" PRIx64, pCtg->clusterId); + ctgWarn("taosHashRemove from cluster failed, may already be freed, clusterId:%"PRIx64, pCtg->clusterId); return; } CTG_CACHE_STAT_SUB(clusterNum, 1); uint64_t clusterId = pCtg->clusterId; - + ctgFreeHandle(pCtg); - - ctgInfo("handle freed, culsterId:%" PRIx64, clusterId); + + ctgInfo("handle freed, culsterId:%"PRIx64, clusterId); } -int32_t catalogGetDBVgVersion(SCatalog *pCtg, const char *dbFName, int32_t *version, int64_t *dbId, int32_t *tableNum) { +int32_t catalogGetDBVgVersion(SCatalog* pCtg, const char* dbFName, int32_t* version, int64_t* dbId, int32_t *tableNum) { CTG_API_ENTER(); if (NULL == pCtg || NULL == dbFName || NULL == version || NULL == dbId) { @@ -2646,11 +596,10 @@ int32_t catalogGetDBVgVersion(SCatalog *pCtg, const char *dbFName, int32_t *vers } SCtgDBCache *dbCache = NULL; - bool inCache = false; - int32_t code = 0; + int32_t code = 0; - CTG_ERR_JRET(ctgAcquireVgInfoFromCache(pCtg, dbFName, &dbCache, &inCache)); - if (!inCache) { + CTG_ERR_JRET(ctgAcquireVgInfoFromCache(pCtg, dbFName, &dbCache)); + if (NULL == dbCache) { *version = CTG_DEFAULT_INVALID_VERSION; CTG_API_LEAVE(TSDB_CODE_SUCCESS); } @@ -2671,20 +620,19 @@ _return: CTG_API_LEAVE(code); } -int32_t catalogGetDBVgInfo(SCatalog *pCtg, void *pRpc, const SEpSet *pMgmtEps, const char *dbFName, - SArray **vgroupList) { +int32_t catalogGetDBVgInfo(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, const char* dbFName, SArray** vgroupList) { CTG_API_ENTER(); - if (NULL == pCtg || NULL == dbFName || NULL == pRpc || NULL == pMgmtEps || NULL == vgroupList) { + if (NULL == pCtg || NULL == dbFName || NULL == pTrans || NULL == pMgmtEps || NULL == vgroupList) { CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT); } - SCtgDBCache *dbCache = NULL; - int32_t code = 0; - SArray *vgList = NULL; - SHashObj *vgHash = NULL; - SDBVgInfo *vgInfo = NULL; - CTG_ERR_JRET(ctgGetDBVgInfo(pCtg, pRpc, pMgmtEps, dbFName, &dbCache, &vgInfo)); + SCtgDBCache* dbCache = NULL; + int32_t code = 0; + SArray *vgList = NULL; + SHashObj *vgHash = NULL; + SDBVgInfo *vgInfo = NULL; + CTG_ERR_JRET(ctgGetDBVgInfo(pCtg, pTrans, pMgmtEps, dbFName, &dbCache, &vgInfo)); if (dbCache) { vgHash = dbCache->vgInfo->vgHash; } else { @@ -2708,31 +656,33 @@ _return: taosMemoryFreeClear(vgInfo); } - CTG_API_LEAVE(code); + CTG_API_LEAVE(code); } -int32_t catalogUpdateDBVgInfo(SCatalog *pCtg, const char *dbFName, uint64_t dbId, SDBVgInfo *dbInfo) { + +int32_t catalogUpdateDBVgInfo(SCatalog* pCtg, const char* dbFName, uint64_t dbId, SDBVgInfo* dbInfo) { CTG_API_ENTER(); int32_t code = 0; - + if (NULL == pCtg || NULL == dbFName || NULL == dbInfo) { ctgFreeVgInfo(dbInfo); CTG_ERR_JRET(TSDB_CODE_CTG_INVALID_INPUT); } - code = ctgPushUpdateVgMsgInQueue(pCtg, dbFName, dbId, dbInfo, false); + code = ctgUpdateVgroupEnqueue(pCtg, dbFName, dbId, dbInfo, false); _return: CTG_API_LEAVE(code); } -int32_t catalogRemoveDB(SCatalog *pCtg, const char *dbFName, uint64_t dbId) { + +int32_t catalogRemoveDB(SCatalog* pCtg, const char* dbFName, uint64_t dbId) { CTG_API_ENTER(); int32_t code = 0; - + if (NULL == pCtg || NULL == dbFName) { CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT); } @@ -2741,22 +691,36 @@ int32_t catalogRemoveDB(SCatalog *pCtg, const char *dbFName, uint64_t dbId) { CTG_API_LEAVE(TSDB_CODE_SUCCESS); } - CTG_ERR_JRET(ctgPushRmDBMsgInQueue(pCtg, dbFName, dbId)); + CTG_ERR_JRET(ctgDropDbCacheEnqueue(pCtg, dbFName, dbId)); CTG_API_LEAVE(TSDB_CODE_SUCCESS); - + _return: CTG_API_LEAVE(code); } -int32_t catalogUpdateVgEpSet(SCatalog *pCtg, const char *dbFName, int32_t vgId, SEpSet *epSet) { return 0; } - -int32_t catalogRemoveTableMeta(SCatalog *pCtg, const SName *pTableName) { +int32_t catalogUpdateVgEpSet(SCatalog* pCtg, const char* dbFName, int32_t vgId, SEpSet *epSet) { CTG_API_ENTER(); int32_t code = 0; + + if (NULL == pCtg || NULL == dbFName || NULL == epSet) { + CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT); + } + + CTG_ERR_JRET(ctgUpdateVgEpsetEnqueue(pCtg, (char*)dbFName, vgId, epSet)); + +_return: + + CTG_API_LEAVE(code); +} + +int32_t catalogRemoveTableMeta(SCatalog* pCtg, SName* pTableName) { + CTG_API_ENTER(); + int32_t code = 0; + if (NULL == pCtg || NULL == pTableName) { CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT); } @@ -2765,39 +729,19 @@ int32_t catalogRemoveTableMeta(SCatalog *pCtg, const SName *pTableName) { CTG_API_LEAVE(TSDB_CODE_SUCCESS); } - STableMeta *tblMeta = NULL; - bool inCache = false; - uint64_t dbId = 0; - CTG_ERR_JRET(ctgGetTableMetaFromCache(pCtg, pTableName, &tblMeta, &inCache, 0, &dbId)); - - if (!inCache) { - ctgDebug("table already not in cache, db:%s, tblName:%s", pTableName->dbname, pTableName->tname); - goto _return; - } - - char dbFName[TSDB_DB_FNAME_LEN]; - tNameGetFullDbName(pTableName, dbFName); - - if (TSDB_SUPER_TABLE == tblMeta->tableType) { - CTG_ERR_JRET(ctgPushRmStbMsgInQueue(pCtg, dbFName, dbId, pTableName->tname, tblMeta->suid, true)); - } else { - CTG_ERR_JRET(ctgPushRmTblMsgInQueue(pCtg, dbFName, dbId, pTableName->tname, true)); - } + CTG_ERR_JRET(ctgRemoveTbMetaFromCache(pCtg, pTableName, true)); - ctgDebug("table meta %s.%s removed", dbFName, pTableName->tname); - _return: - - taosMemoryFreeClear(tblMeta); - + CTG_API_LEAVE(code); } -int32_t catalogRemoveStbMeta(SCatalog *pCtg, const char *dbFName, uint64_t dbId, const char *stbName, uint64_t suid) { + +int32_t catalogRemoveStbMeta(SCatalog* pCtg, const char* dbFName, uint64_t dbId, const char* stbName, uint64_t suid) { CTG_API_ENTER(); int32_t code = 0; - + if (NULL == pCtg || NULL == dbFName || NULL == stbName) { CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT); } @@ -2806,35 +750,36 @@ int32_t catalogRemoveStbMeta(SCatalog *pCtg, const char *dbFName, uint64_t dbId, CTG_API_LEAVE(TSDB_CODE_SUCCESS); } - CTG_ERR_JRET(ctgPushRmStbMsgInQueue(pCtg, dbFName, dbId, stbName, suid, true)); + CTG_ERR_JRET(ctgDropStbMetaEnqueue(pCtg, dbFName, dbId, stbName, suid, true)); CTG_API_LEAVE(TSDB_CODE_SUCCESS); - + _return: CTG_API_LEAVE(code); } -int32_t catalogGetIndexMeta(SCatalog *pCtg, void *pTrans, const SEpSet *pMgmtEps, const SName *pTableName, - const char *pIndexName, SIndexMeta **pIndexMeta) { - return 0; -} - -int32_t catalogGetTableMeta(SCatalog *pCtg, void *pTrans, const SEpSet *pMgmtEps, const SName *pTableName, - STableMeta **pTableMeta) { +int32_t catalogGetTableMeta(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, const SName* pTableName, STableMeta** pTableMeta) { CTG_API_ENTER(); - CTG_API_LEAVE(ctgGetTableMeta(pCtg, pTrans, pMgmtEps, pTableName, pTableMeta, CTG_FLAG_UNKNOWN_STB)); + SCtgTbMetaCtx ctx = {0}; + ctx.pName = (SName*)pTableName; + ctx.flag = CTG_FLAG_UNKNOWN_STB; + + CTG_API_LEAVE(ctgGetTbMeta(pCtg, pTrans, pMgmtEps, &ctx, pTableMeta)); } -int32_t catalogGetSTableMeta(SCatalog *pCtg, void *pTrans, const SEpSet *pMgmtEps, const SName *pTableName, - STableMeta **pTableMeta) { +int32_t catalogGetSTableMeta(SCatalog* pCtg, void * pTrans, const SEpSet* pMgmtEps, const SName* pTableName, STableMeta** pTableMeta) { CTG_API_ENTER(); - CTG_API_LEAVE(ctgGetTableMeta(pCtg, pTrans, pMgmtEps, pTableName, pTableMeta, CTG_FLAG_STB)); + SCtgTbMetaCtx ctx = {0}; + ctx.pName = (SName*)pTableName; + ctx.flag = CTG_FLAG_STB; + + CTG_API_LEAVE(ctgGetTbMeta(CTG_PARAMS_LIST(), &ctx, pTableMeta)); } -int32_t catalogUpdateSTableMeta(SCatalog *pCtg, STableMetaRsp *rspMsg) { +int32_t catalogUpdateSTableMeta(SCatalog* pCtg, STableMetaRsp *rspMsg) { CTG_API_ENTER(); if (NULL == pCtg || NULL == rspMsg) { @@ -2846,118 +791,40 @@ int32_t catalogUpdateSTableMeta(SCatalog *pCtg, STableMetaRsp *rspMsg) { ctgError("malloc %d failed", (int32_t)sizeof(STableMetaOutput)); CTG_API_LEAVE(TSDB_CODE_CTG_MEM_ERROR); } - + int32_t code = 0; strcpy(output->dbFName, rspMsg->dbFName); strcpy(output->tbName, rspMsg->tbName); output->dbId = rspMsg->dbId; - + SET_META_TYPE_TABLE(output->metaType); - + CTG_ERR_JRET(queryCreateTableMetaFromMsg(rspMsg, true, &output->tbMeta)); - CTG_ERR_JRET(ctgPushUpdateTblMsgInQueue(pCtg, output, false)); + CTG_ERR_JRET(ctgUpdateTbMetaEnqueue(pCtg, output, false)); CTG_API_LEAVE(code); - + _return: taosMemoryFreeClear(output->tbMeta); taosMemoryFreeClear(output); - + CTG_API_LEAVE(code); } -int32_t ctgGetTbSverFromCache(SCatalog *pCtg, const SName *pTableName, int32_t *sver, int32_t *tbType, uint64_t *suid, - char *stbName) { - *sver = -1; - - if (NULL == pCtg->dbCache) { - ctgDebug("empty tbmeta cache, tbName:%s", pTableName->tname); - return TSDB_CODE_SUCCESS; - } - - SCtgDBCache *dbCache = NULL; - char dbFName[TSDB_DB_FNAME_LEN] = {0}; - tNameGetFullDbName(pTableName, dbFName); - - ctgAcquireDBCache(pCtg, dbFName, &dbCache); - if (NULL == dbCache) { - ctgDebug("db %s not in cache", pTableName->tname); - return TSDB_CODE_SUCCESS; - } - - CTG_LOCK(CTG_READ, &dbCache->tbCache.metaLock); - STableMeta *tbMeta = taosHashGet(dbCache->tbCache.metaCache, pTableName->tname, strlen(pTableName->tname)); - if (tbMeta) { - *tbType = tbMeta->tableType; - *suid = tbMeta->suid; - if (*tbType != TSDB_CHILD_TABLE) { - *sver = tbMeta->sversion; - } - } - CTG_UNLOCK(CTG_READ, &dbCache->tbCache.metaLock); - - if (NULL == tbMeta) { - ctgReleaseDBCache(pCtg, dbCache); - return TSDB_CODE_SUCCESS; - } - - if (*tbType != TSDB_CHILD_TABLE) { - ctgReleaseDBCache(pCtg, dbCache); - ctgDebug("Got sver %d from cache, type:%d, dbFName:%s, tbName:%s", *sver, *tbType, dbFName, pTableName->tname); - - return TSDB_CODE_SUCCESS; - } - - ctgDebug("Got subtable meta from cache, dbFName:%s, tbName:%s, suid:%" PRIx64, dbFName, pTableName->tname, *suid); - - CTG_LOCK(CTG_READ, &dbCache->tbCache.stbLock); - - STableMeta **stbMeta = taosHashGet(dbCache->tbCache.stbCache, suid, sizeof(*suid)); - if (NULL == stbMeta || NULL == *stbMeta) { - CTG_UNLOCK(CTG_READ, &dbCache->tbCache.stbLock); - ctgReleaseDBCache(pCtg, dbCache); - ctgDebug("stb not in stbCache, suid:%" PRIx64, *suid); - return TSDB_CODE_SUCCESS; - } - - if ((*stbMeta)->suid != *suid) { - CTG_UNLOCK(CTG_READ, &dbCache->tbCache.stbLock); - ctgReleaseDBCache(pCtg, dbCache); - ctgError("stable suid in stbCache mis-match, expected suid:%" PRIx64 ",actual suid:%" PRIx64, *suid, - (*stbMeta)->suid); - CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR); - } - - size_t nameLen = 0; - char *name = taosHashGetKey(*stbMeta, &nameLen); - - strncpy(stbName, name, nameLen); - stbName[nameLen] = 0; - - *sver = (*stbMeta)->sversion; - - CTG_UNLOCK(CTG_READ, &dbCache->tbCache.stbLock); - - ctgReleaseDBCache(pCtg, dbCache); - - ctgDebug("Got sver %d from cache, type:%d, dbFName:%s, tbName:%s", *sver, *tbType, dbFName, pTableName->tname); - - return TSDB_CODE_SUCCESS; -} - -int32_t catalogChkTbMetaVersion(SCatalog *pCtg, void *pTrans, const SEpSet *pMgmtEps, SArray *pTables) { +int32_t catalogChkTbMetaVersion(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, SArray* pTables) { CTG_API_ENTER(); if (NULL == pCtg || NULL == pTrans || NULL == pMgmtEps || NULL == pTables) { CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT); } - SName name; + SName name; int32_t sver = 0; + int32_t tver = 0; int32_t tbNum = taosArrayGetSize(pTables); for (int32_t i = 0; i < tbNum; ++i) { STbSVersion* pTb = (STbSVersion*)taosArrayGet(pTables, i); @@ -2974,8 +841,8 @@ int32_t catalogChkTbMetaVersion(SCatalog *pCtg, void *pTrans, const SEpSet *pMgm int32_t tbType = 0; uint64_t suid = 0; char stbName[TSDB_TABLE_FNAME_LEN]; - ctgGetTbSverFromCache(pCtg, &name, &sver, &tbType, &suid, stbName); - if (sver >= 0 && sver < pTb->sver) { + ctgReadTbVerFromCache(pCtg, &name, &sver, &tver, &tbType, &suid, stbName); + if ((sver >= 0 && sver < pTb->sver) || (tver >= 0 && tver < pTb->tver)) { switch (tbType) { case TSDB_CHILD_TABLE: { SName stb = name; @@ -2997,7 +864,8 @@ int32_t catalogChkTbMetaVersion(SCatalog *pCtg, void *pTrans, const SEpSet *pMgm CTG_API_LEAVE(TSDB_CODE_SUCCESS); } -int32_t catalogRefreshDBVgInfo(SCatalog *pCtg, void *pTrans, const SEpSet *pMgmtEps, const char *dbFName) { + +int32_t catalogRefreshDBVgInfo(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, const char* dbFName) { CTG_API_ENTER(); if (NULL == pCtg || NULL == pTrans || NULL == pMgmtEps || NULL == dbFName) { @@ -3007,31 +875,34 @@ int32_t catalogRefreshDBVgInfo(SCatalog *pCtg, void *pTrans, const SEpSet *pMgmt CTG_API_LEAVE(ctgRefreshDBVgInfo(pCtg, pTrans, pMgmtEps, dbFName)); } -int32_t catalogRefreshTableMeta(SCatalog *pCtg, void *pTrans, const SEpSet *pMgmtEps, const SName *pTableName, - int32_t isSTable) { +int32_t catalogRefreshTableMeta(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, const SName* pTableName, int32_t isSTable) { CTG_API_ENTER(); if (NULL == pCtg || NULL == pTrans || NULL == pMgmtEps || NULL == pTableName) { CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT); } - CTG_API_LEAVE(ctgRefreshTblMeta(pCtg, pTrans, pMgmtEps, pTableName, - CTG_FLAG_FORCE_UPDATE | CTG_FLAG_MAKE_STB(isSTable), NULL, true)); + SCtgTbMetaCtx ctx = {0}; + ctx.pName = (SName*)pTableName; + ctx.flag = CTG_FLAG_FORCE_UPDATE | CTG_FLAG_MAKE_STB(isSTable); + + CTG_API_LEAVE(ctgRefreshTbMeta(CTG_PARAMS_LIST(), &ctx, NULL, true)); } -int32_t catalogRefreshGetTableMeta(SCatalog *pCtg, void *pTrans, const SEpSet *pMgmtEps, const SName *pTableName, - STableMeta **pTableMeta, int32_t isSTable) { +int32_t catalogRefreshGetTableMeta(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, const SName* pTableName, STableMeta** pTableMeta, int32_t isSTable) { CTG_API_ENTER(); - CTG_API_LEAVE(ctgGetTableMeta(pCtg, pTrans, pMgmtEps, pTableName, pTableMeta, - CTG_FLAG_FORCE_UPDATE | CTG_FLAG_MAKE_STB(isSTable))); + SCtgTbMetaCtx ctx = {0}; + ctx.pName = (SName*)pTableName; + ctx.flag = CTG_FLAG_FORCE_UPDATE | CTG_FLAG_MAKE_STB(isSTable); + + CTG_API_LEAVE(ctgGetTbMeta(CTG_PARAMS_LIST(), &ctx, pTableMeta)); } -int32_t catalogGetTableDistVgInfo(SCatalog *pCtg, void *pRpc, const SEpSet *pMgmtEps, const SName *pTableName, - SArray **pVgList) { +int32_t catalogGetTableDistVgInfo(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, const SName* pTableName, SArray** pVgList) { CTG_API_ENTER(); - if (NULL == pCtg || NULL == pRpc || NULL == pMgmtEps || NULL == pTableName || NULL == pVgList) { + if (NULL == pCtg || NULL == pTrans || NULL == pMgmtEps || NULL == pTableName || NULL == pVgList) { CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT); } @@ -3040,33 +911,11 @@ int32_t catalogGetTableDistVgInfo(SCatalog *pCtg, void *pRpc, const SEpSet *pMgm CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT); } - int32_t code = 0; - - while (true) { - code = ctgGetTableDistVgInfo(pCtg, pRpc, pMgmtEps, pTableName, pVgList); - if (code) { - if (TSDB_CODE_CTG_VG_META_MISMATCH == code) { - CTG_ERR_JRET(ctgRefreshTblMeta(pCtg, pRpc, pMgmtEps, pTableName, - CTG_FLAG_FORCE_UPDATE | CTG_FLAG_MAKE_STB(CTG_FLAG_UNKNOWN_STB), NULL, true)); - - char dbFName[TSDB_DB_FNAME_LEN] = {0}; - tNameGetFullDbName(pTableName, dbFName); - CTG_ERR_JRET(ctgRefreshDBVgInfo(pCtg, pRpc, pMgmtEps, dbFName)); - - continue; - } - } - - break; - } - -_return: - - CTG_API_LEAVE(code); + CTG_API_LEAVE(ctgGetTbDistVgInfo(pCtg, pTrans, pMgmtEps, (SName*)pTableName, pVgList)); } -int32_t catalogGetTableHashVgroup(SCatalog *pCtg, void *pTrans, const SEpSet *pMgmtEps, const SName *pTableName, - SVgroupInfo *pVgroup) { + +int32_t catalogGetTableHashVgroup(SCatalog *pCtg, void *pTrans, const SEpSet *pMgmtEps, const SName *pTableName, SVgroupInfo *pVgroup) { CTG_API_ENTER(); if (CTG_IS_SYS_DBNAME(pTableName->dbname)) { @@ -3074,9 +923,9 @@ int32_t catalogGetTableHashVgroup(SCatalog *pCtg, void *pTrans, const SEpSet *pM CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT); } - SCtgDBCache *dbCache = NULL; - int32_t code = 0; - char db[TSDB_DB_FNAME_LEN] = {0}; + SCtgDBCache* dbCache = NULL; + int32_t code = 0; + char db[TSDB_DB_FNAME_LEN] = {0}; tNameGetFullDbName(pTableName, db); SDBVgInfo *vgInfo = NULL; @@ -3099,8 +948,8 @@ _return: CTG_API_LEAVE(code); } -int32_t catalogGetAllMeta(SCatalog *pCtg, void *pTrans, const SEpSet *pMgmtEps, const SCatalogReq *pReq, - SMetaData *pRsp) { + +int32_t catalogGetAllMeta(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, const SCatalogReq* pReq, SMetaData* pRsp) { CTG_API_ENTER(); if (NULL == pCtg || NULL == pTrans || NULL == pMgmtEps || NULL == pReq || NULL == pRsp) { @@ -3110,8 +959,8 @@ int32_t catalogGetAllMeta(SCatalog *pCtg, void *pTrans, const SEpSet *pMgmtEps, int32_t code = 0; pRsp->pTableMeta = NULL; - if (pReq->pTableName) { - int32_t tbNum = (int32_t)taosArrayGetSize(pReq->pTableName); + if (pReq->pTableMeta) { + int32_t tbNum = (int32_t)taosArrayGetSize(pReq->pTableMeta); if (tbNum <= 0) { ctgError("empty table name list, tbNum:%d", tbNum); CTG_ERR_JRET(TSDB_CODE_CTG_INVALID_INPUT); @@ -3122,12 +971,15 @@ int32_t catalogGetAllMeta(SCatalog *pCtg, void *pTrans, const SEpSet *pMgmtEps, ctgError("taosArrayInit %d failed", tbNum); CTG_ERR_JRET(TSDB_CODE_CTG_MEM_ERROR); } - + for (int32_t i = 0; i < tbNum; ++i) { - SName *name = taosArrayGet(pReq->pTableName, i); + SName *name = taosArrayGet(pReq->pTableMeta, i); STableMeta *pTableMeta = NULL; - - CTG_ERR_JRET(ctgGetTableMeta(pCtg, pTrans, pMgmtEps, name, &pTableMeta, CTG_FLAG_UNKNOWN_STB)); + SCtgTbMetaCtx ctx = {0}; + ctx.pName = name; + ctx.flag = CTG_FLAG_UNKNOWN_STB; + + CTG_ERR_JRET(ctgGetTbMeta(CTG_PARAMS_LIST(), &ctx, &pTableMeta)); if (NULL == taosArrayPush(pRsp->pTableMeta, &pTableMeta)) { ctgError("taosArrayPush failed, idx:%d", i); @@ -3139,12 +991,12 @@ int32_t catalogGetAllMeta(SCatalog *pCtg, void *pTrans, const SEpSet *pMgmtEps, if (pReq->qNodeRequired) { pRsp->pQnodeList = taosArrayInit(10, sizeof(SQueryNodeAddr)); - CTG_ERR_JRET(ctgGetQnodeListFromMnode(pCtg, pTrans, pMgmtEps, pRsp->pQnodeList)); + CTG_ERR_JRET(ctgGetQnodeListFromMnode(CTG_PARAMS_LIST(), pRsp->pQnodeList, NULL)); } CTG_API_LEAVE(TSDB_CODE_SUCCESS); -_return: +_return: if (pRsp->pTableMeta) { int32_t aSize = taosArrayGetSize(pRsp->pTableMeta); @@ -3152,30 +1004,58 @@ _return: STableMeta *pMeta = taosArrayGetP(pRsp->pTableMeta, i); taosMemoryFreeClear(pMeta); } - + taosArrayDestroy(pRsp->pTableMeta); pRsp->pTableMeta = NULL; } - + CTG_API_LEAVE(code); } -int32_t catalogGetQnodeList(SCatalog *pCtg, void *pRpc, const SEpSet *pMgmtEps, SArray *pQnodeList) { +int32_t catalogAsyncGetAllMeta(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, uint64_t reqId, const SCatalogReq* pReq, catalogCallback fp, void* param, int64_t* jobId) { CTG_API_ENTER(); + if (NULL == pCtg || NULL == pTrans || NULL == pMgmtEps || NULL == pReq || NULL == fp || NULL == param) { + CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT); + } + + int32_t code = 0; + SCtgJob *pJob = NULL; + CTG_ERR_JRET(ctgInitJob(CTG_PARAMS_LIST(), &pJob, reqId, pReq, fp, param)); + + CTG_ERR_JRET(ctgLaunchJob(pJob)); + + *jobId = pJob->refId; + +_return: + + if (pJob) { + taosReleaseRef(gCtgMgmt.jobPool, pJob->refId); + + if (code) { + taosRemoveRef(gCtgMgmt.jobPool, pJob->refId); + } + } + + CTG_API_LEAVE(code); +} + +int32_t catalogGetQnodeList(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, SArray* pQnodeList) { + CTG_API_ENTER(); + int32_t code = 0; - if (NULL == pCtg || NULL == pRpc || NULL == pMgmtEps || NULL == pQnodeList) { + if (NULL == pCtg || NULL == pTrans || NULL == pMgmtEps || NULL == pQnodeList) { CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT); } - CTG_ERR_JRET(ctgGetQnodeListFromMnode(pCtg, pRpc, pMgmtEps, pQnodeList)); + CTG_ERR_JRET(ctgGetQnodeListFromMnode(CTG_PARAMS_LIST(), pQnodeList, NULL)); _return: CTG_API_LEAVE(TSDB_CODE_SUCCESS); } -int32_t catalogGetExpiredSTables(SCatalog *pCtg, SSTableMetaVersion **stables, uint32_t *num) { +int32_t catalogGetExpiredSTables(SCatalog* pCtg, SSTableMetaVersion **stables, uint32_t *num) { CTG_API_ENTER(); if (NULL == pCtg || NULL == stables || NULL == num) { @@ -3185,9 +1065,9 @@ int32_t catalogGetExpiredSTables(SCatalog *pCtg, SSTableMetaVersion **stables, u CTG_API_LEAVE(ctgMetaRentGet(&pCtg->stbRent, (void **)stables, num, sizeof(SSTableMetaVersion))); } -int32_t catalogGetExpiredDBs(SCatalog *pCtg, SDbVgVersion **dbs, uint32_t *num) { +int32_t catalogGetExpiredDBs(SCatalog* pCtg, SDbVgVersion **dbs, uint32_t *num) { CTG_API_ENTER(); - + if (NULL == pCtg || NULL == dbs || NULL == num) { CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT); } @@ -3195,9 +1075,9 @@ int32_t catalogGetExpiredDBs(SCatalog *pCtg, SDbVgVersion **dbs, uint32_t *num) CTG_API_LEAVE(ctgMetaRentGet(&pCtg->dbRent, (void **)dbs, num, sizeof(SDbVgVersion))); } -int32_t catalogGetExpiredUsers(SCatalog *pCtg, SUserAuthVersion **users, uint32_t *num) { +int32_t catalogGetExpiredUsers(SCatalog* pCtg, SUserAuthVersion **users, uint32_t *num) { CTG_API_ENTER(); - + if (NULL == pCtg || NULL == users || NULL == num) { CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT); } @@ -3211,102 +1091,96 @@ int32_t catalogGetExpiredUsers(SCatalog *pCtg, SUserAuthVersion **users, uint32_ } } - uint32_t i = 0; + uint32_t i = 0; SCtgUserAuth *pAuth = taosHashIterate(pCtg->userCache, NULL); while (pAuth != NULL) { - void *key = taosHashGetKey(pAuth, NULL); - strncpy((*users)[i].user, key, sizeof((*users)[i].user)); + size_t len = 0; + void *key = taosHashGetKey(pAuth, &len); + strncpy((*users)[i].user, key, len); + (*users)[i].user[len] = 0; (*users)[i].version = pAuth->version; + ++i; pAuth = taosHashIterate(pCtg->userCache, pAuth); } CTG_API_LEAVE(TSDB_CODE_SUCCESS); } -int32_t catalogGetDBCfg(SCatalog *pCtg, void *pRpc, const SEpSet *pMgmtEps, const char *dbFName, SDbCfgInfo *pDbCfg) { - CTG_API_ENTER(); - if (NULL == pCtg || NULL == pRpc || NULL == pMgmtEps || NULL == dbFName || NULL == pDbCfg) { +int32_t catalogGetDBCfg(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, const char* dbFName, SDbCfgInfo* pDbCfg) { + CTG_API_ENTER(); + + if (NULL == pCtg || NULL == pTrans || NULL == pMgmtEps || NULL == dbFName || NULL == pDbCfg) { CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT); } - CTG_API_LEAVE(ctgGetDBCfgFromMnode(pCtg, pRpc, pMgmtEps, dbFName, pDbCfg)); + CTG_API_LEAVE(ctgGetDBCfgFromMnode(CTG_PARAMS_LIST(), dbFName, pDbCfg, NULL)); } -int32_t catalogGetIndexInfo(SCatalog *pCtg, void *pRpc, const SEpSet *pMgmtEps, const char *indexName, - SIndexInfo *pInfo) { +int32_t catalogGetIndexMeta(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, const char* indexName, SIndexInfo* pInfo) { CTG_API_ENTER(); - - if (NULL == pCtg || NULL == pRpc || NULL == pMgmtEps || NULL == indexName || NULL == pInfo) { + + if (NULL == pCtg || NULL == pTrans || NULL == pMgmtEps || NULL == indexName || NULL == pInfo) { CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT); } - CTG_API_LEAVE(ctgGetIndexInfoFromMnode(pCtg, pRpc, pMgmtEps, indexName, pInfo)); + CTG_API_LEAVE(ctgGetIndexInfoFromMnode(CTG_PARAMS_LIST(), indexName, pInfo, NULL)); } -int32_t catalogGetUdfInfo(SCatalog *pCtg, void *pRpc, const SEpSet *pMgmtEps, const char *funcName, SFuncInfo **pInfo) { +int32_t catalogGetUdfInfo(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, const char* funcName, SFuncInfo* pInfo) { CTG_API_ENTER(); - - if (NULL == pCtg || NULL == pRpc || NULL == pMgmtEps || NULL == funcName || NULL == pInfo) { + + if (NULL == pCtg || NULL == pTrans || NULL == pMgmtEps || NULL == funcName || NULL == pInfo) { CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT); } int32_t code = 0; - *pInfo = taosMemoryMalloc(sizeof(SFuncInfo)); - if (NULL == *pInfo) { - CTG_API_LEAVE(TSDB_CODE_OUT_OF_MEMORY); - } - - CTG_ERR_JRET(ctgGetUdfInfoFromMnode(pCtg, pRpc, pMgmtEps, funcName, pInfo)); - + CTG_ERR_JRET(ctgGetUdfInfoFromMnode(CTG_PARAMS_LIST(), funcName, pInfo, NULL)); + _return: - - if (code) { - taosMemoryFreeClear(*pInfo); - } - + CTG_API_LEAVE(code); } -int32_t catalogChkAuth(SCatalog *pCtg, void *pRpc, const SEpSet *pMgmtEps, const char *user, const char *dbFName, - AUTH_TYPE type, bool *pass) { +int32_t catalogChkAuth(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, const char* user, const char* dbFName, AUTH_TYPE type, bool *pass) { CTG_API_ENTER(); - - if (NULL == pCtg || NULL == pRpc || NULL == pMgmtEps || NULL == user || NULL == dbFName || NULL == pass) { + + if (NULL == pCtg || NULL == pTrans || NULL == pMgmtEps || NULL == user || NULL == dbFName || NULL == pass) { CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT); } int32_t code = 0; - CTG_ERR_JRET(ctgChkAuth(pCtg, pRpc, pMgmtEps, user, dbFName, type, pass)); - + CTG_ERR_JRET(ctgChkAuth(CTG_PARAMS_LIST(), user, dbFName, type, pass)); + _return: CTG_API_LEAVE(code); } -int32_t catalogUpdateUserAuthInfo(SCatalog *pCtg, SGetUserAuthRsp *pAuth) { +int32_t catalogUpdateUserAuthInfo(SCatalog* pCtg, SGetUserAuthRsp* pAuth) { CTG_API_ENTER(); if (NULL == pCtg || NULL == pAuth) { CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT); } - CTG_API_LEAVE(ctgPushUpdateUserMsgInQueue(pCtg, pAuth, false)); + CTG_API_LEAVE(ctgUpdateUserEnqueue(pCtg, pAuth, false)); } + void catalogDestroy(void) { qInfo("start to destroy catalog"); - - if (NULL == gCtgMgmt.pCluster || atomic_load_8((int8_t *)&gCtgMgmt.exit)) { + + if (NULL == gCtgMgmt.pCluster || atomic_load_8((int8_t*)&gCtgMgmt.exit)) { return; } - atomic_store_8((int8_t *)&gCtgMgmt.exit, true); + atomic_store_8((int8_t*)&gCtgMgmt.exit, true); if (tsem_post(&gCtgMgmt.queue.reqSem)) { qError("tsem_post failed, error:%s", tstrerror(TAOS_SYSTEM_ERROR(errno))); } - + if (tsem_post(&gCtgMgmt.queue.rspSem)) { qError("tsem_post failed, error:%s", tstrerror(TAOS_SYSTEM_ERROR(errno))); } @@ -3314,26 +1188,28 @@ void catalogDestroy(void) { while (CTG_IS_LOCKED(&gCtgMgmt.lock)) { taosUsleep(1); } - + CTG_LOCK(CTG_WRITE, &gCtgMgmt.lock); SCatalog *pCtg = NULL; - void *pIter = taosHashIterate(gCtgMgmt.pCluster, NULL); + void *pIter = taosHashIterate(gCtgMgmt.pCluster, NULL); while (pIter) { pCtg = *(SCatalog **)pIter; if (pCtg) { catalogFreeHandle(pCtg); } - + pIter = taosHashIterate(gCtgMgmt.pCluster, pIter); } - + taosHashCleanup(gCtgMgmt.pCluster); gCtgMgmt.pCluster = NULL; - CTG_UNLOCK(CTG_WRITE, &gCtgMgmt.lock); + if (CTG_IS_LOCKED(&gCtgMgmt.lock) == TD_RWLATCH_WRITE_FLAG_COPY) CTG_UNLOCK(CTG_WRITE, &gCtgMgmt.lock); qInfo("catalog destroyed"); } + + diff --git a/source/libs/catalog/src/catalogDbg.c b/source/libs/catalog/src/catalogDbg.c deleted file mode 100644 index 1d4ad0082c7e0736dc2ccad54609319e29e426f7..0000000000000000000000000000000000000000 --- a/source/libs/catalog/src/catalogDbg.c +++ /dev/null @@ -1,222 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#include "trpc.h" -#include "query.h" -#include "tname.h" -#include "catalogInt.h" - -extern SCatalogMgmt gCtgMgmt; -SCtgDebug gCTGDebug = {0}; - -int32_t ctgdEnableDebug(char *option) { - if (0 == strcasecmp(option, "lock")) { - gCTGDebug.lockEnable = true; - qDebug("lock debug enabled"); - return TSDB_CODE_SUCCESS; - } - - if (0 == strcasecmp(option, "cache")) { - gCTGDebug.cacheEnable = true; - qDebug("cache debug enabled"); - return TSDB_CODE_SUCCESS; - } - - if (0 == strcasecmp(option, "api")) { - gCTGDebug.apiEnable = true; - qDebug("api debug enabled"); - return TSDB_CODE_SUCCESS; - } - - if (0 == strcasecmp(option, "meta")) { - gCTGDebug.metaEnable = true; - qDebug("api debug enabled"); - return TSDB_CODE_SUCCESS; - } - - qError("invalid debug option:%s", option); - - return TSDB_CODE_CTG_INTERNAL_ERROR; -} - -int32_t ctgdGetStatNum(char *option, void *res) { - if (0 == strcasecmp(option, "runtime.qDoneNum")) { - *(uint64_t *)res = atomic_load_64(&gCtgMgmt.stat.runtime.qDoneNum); - return TSDB_CODE_SUCCESS; - } - - qError("invalid stat option:%s", option); - - return TSDB_CODE_CTG_INTERNAL_ERROR; -} - -int32_t ctgdGetTbMetaNum(SCtgDBCache *dbCache) { - return dbCache->tbCache.metaCache ? (int32_t)taosHashGetSize(dbCache->tbCache.metaCache) : 0; -} - -int32_t ctgdGetStbNum(SCtgDBCache *dbCache) { - return dbCache->tbCache.stbCache ? (int32_t)taosHashGetSize(dbCache->tbCache.stbCache) : 0; -} - -int32_t ctgdGetRentNum(SCtgRentMgmt *rent) { - int32_t num = 0; - for (uint16_t i = 0; i < rent->slotNum; ++i) { - SCtgRentSlot *slot = &rent->slots[i]; - if (NULL == slot->meta) { - continue; - } - - num += taosArrayGetSize(slot->meta); - } - - return num; -} - -int32_t ctgdGetClusterCacheNum(SCatalog* pCtg, int32_t type) { - if (NULL == pCtg || NULL == pCtg->dbCache) { - return 0; - } - - switch (type) { - case CTG_DBG_DB_NUM: - return (int32_t)taosHashGetSize(pCtg->dbCache); - case CTG_DBG_DB_RENT_NUM: - return ctgdGetRentNum(&pCtg->dbRent); - case CTG_DBG_STB_RENT_NUM: - return ctgdGetRentNum(&pCtg->stbRent); - default: - break; - } - - SCtgDBCache *dbCache = NULL; - int32_t num = 0; - void *pIter = taosHashIterate(pCtg->dbCache, NULL); - while (pIter) { - dbCache = (SCtgDBCache *)pIter; - switch (type) { - case CTG_DBG_META_NUM: - num += ctgdGetTbMetaNum(dbCache); - break; - case CTG_DBG_STB_NUM: - num += ctgdGetStbNum(dbCache); - break; - default: - ctgError("invalid type:%d", type); - break; - } - pIter = taosHashIterate(pCtg->dbCache, pIter); - } - - return num; -} - -void ctgdShowTableMeta(SCatalog* pCtg, const char *tbName, STableMeta* p) { - if (!gCTGDebug.metaEnable) { - return; - } - - STableComInfo *c = &p->tableInfo; - - if (TSDB_CHILD_TABLE == p->tableType) { - ctgDebug("table [%s] meta: type:%d, vgId:%d, uid:%" PRIx64 ",suid:%" PRIx64, tbName, p->tableType, p->vgId, p->uid, p->suid); - return; - } else { - ctgDebug("table [%s] meta: type:%d, vgId:%d, uid:%" PRIx64 ",suid:%" PRIx64 ",sv:%d, tv:%d, tagNum:%d, precision:%d, colNum:%d, rowSize:%d", - tbName, p->tableType, p->vgId, p->uid, p->suid, p->sversion, p->tversion, c->numOfTags, c->precision, c->numOfColumns, c->rowSize); - } - - int32_t colNum = c->numOfColumns + c->numOfTags; - for (int32_t i = 0; i < colNum; ++i) { - SSchema *s = &p->schema[i]; - ctgDebug("[%d] name:%s, type:%d, colId:%d, bytes:%d", i, s->name, s->type, s->colId, s->bytes); - } -} - -void ctgdShowDBCache(SCatalog* pCtg, SHashObj *dbHash) { - if (NULL == dbHash || !gCTGDebug.cacheEnable) { - return; - } - - int32_t i = 0; - SCtgDBCache *dbCache = NULL; - void *pIter = taosHashIterate(dbHash, NULL); - while (pIter) { - char *dbFName = NULL; - size_t len = 0; - - dbCache = (SCtgDBCache *)pIter; - - dbFName = taosHashGetKey(pIter, &len); - - int32_t metaNum = dbCache->tbCache.metaCache ? taosHashGetSize(dbCache->tbCache.metaCache) : 0; - int32_t stbNum = dbCache->tbCache.stbCache ? taosHashGetSize(dbCache->tbCache.stbCache) : 0; - int32_t vgVersion = CTG_DEFAULT_INVALID_VERSION; - int32_t hashMethod = -1; - int32_t vgNum = 0; - - if (dbCache->vgInfo) { - vgVersion = dbCache->vgInfo->vgVersion; - hashMethod = dbCache->vgInfo->hashMethod; - if (dbCache->vgInfo->vgHash) { - vgNum = taosHashGetSize(dbCache->vgInfo->vgHash); - } - } - - ctgDebug("[%d] db [%.*s][%"PRIx64"] %s: metaNum:%d, stbNum:%d, vgVersion:%d, hashMethod:%d, vgNum:%d", - i, (int32_t)len, dbFName, dbCache->dbId, dbCache->deleted?"deleted":"", metaNum, stbNum, vgVersion, hashMethod, vgNum); - - pIter = taosHashIterate(dbHash, pIter); - } -} - - - - -void ctgdShowClusterCache(SCatalog* pCtg) { - if (!gCTGDebug.cacheEnable || NULL == pCtg) { - return; - } - - ctgDebug("## cluster %"PRIx64" %p cache Info BEGIN ##", pCtg->clusterId, pCtg); - ctgDebug("db:%d meta:%d stb:%d dbRent:%d stbRent:%d", ctgdGetClusterCacheNum(pCtg, CTG_DBG_DB_NUM), ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM), - ctgdGetClusterCacheNum(pCtg, CTG_DBG_STB_NUM), ctgdGetClusterCacheNum(pCtg, CTG_DBG_DB_RENT_NUM), ctgdGetClusterCacheNum(pCtg, CTG_DBG_STB_RENT_NUM)); - - ctgdShowDBCache(pCtg, pCtg->dbCache); - - ctgDebug("## cluster %"PRIx64" %p cache Info END ##", pCtg->clusterId, pCtg); -} - -int32_t ctgdShowCacheInfo(void) { - if (!gCTGDebug.cacheEnable) { - return TSDB_CODE_CTG_OUT_OF_SERVICE; - } - - CTG_API_ENTER(); - - SCatalog *pCtg = NULL; - void *pIter = taosHashIterate(gCtgMgmt.pCluster, NULL); - while (pIter) { - pCtg = *(SCatalog **)pIter; - - if (pCtg) { - ctgdShowClusterCache(pCtg); - } - - pIter = taosHashIterate(gCtgMgmt.pCluster, pIter); - } - - CTG_API_LEAVE(TSDB_CODE_SUCCESS); -} - diff --git a/source/libs/catalog/src/ctgAsync.c b/source/libs/catalog/src/ctgAsync.c new file mode 100644 index 0000000000000000000000000000000000000000..eb84bf00a444fb6bc57652ee32abdf44035a0426 --- /dev/null +++ b/source/libs/catalog/src/ctgAsync.c @@ -0,0 +1,1115 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "trpc.h" +#include "query.h" +#include "tname.h" +#include "catalogInt.h" +#include "systable.h" +#include "tref.h" + +int32_t ctgInitGetTbMetaTask(SCtgJob *pJob, int32_t taskIdx, SName *name) { + SCtgTask task = {0}; + + task.type = CTG_TASK_GET_TB_META; + task.taskId = taskIdx; + task.pJob = pJob; + + task.taskCtx = taosMemoryCalloc(1, sizeof(SCtgTbMetaCtx)); + if (NULL == task.taskCtx) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + + SCtgTbMetaCtx* ctx = task.taskCtx; + ctx->pName = taosMemoryMalloc(sizeof(*name)); + if (NULL == ctx->pName) { + taosMemoryFree(task.taskCtx); + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + + memcpy(ctx->pName, name, sizeof(*name)); + ctx->flag = CTG_FLAG_UNKNOWN_STB; + + taosArrayPush(pJob->pTasks, &task); + + qDebug("QID:%" PRIx64 " task %d type %d initialized, tableName:%s", pJob->queryId, taskIdx, task.type, name->tname); + + return TSDB_CODE_SUCCESS; +} + +int32_t ctgInitGetDbVgTask(SCtgJob *pJob, int32_t taskIdx, char *dbFName) { + SCtgTask task = {0}; + + task.type = CTG_TASK_GET_DB_VGROUP; + task.taskId = taskIdx; + task.pJob = pJob; + + task.taskCtx = taosMemoryCalloc(1, sizeof(SCtgDbVgCtx)); + if (NULL == task.taskCtx) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + + SCtgDbVgCtx* ctx = task.taskCtx; + + memcpy(ctx->dbFName, dbFName, sizeof(ctx->dbFName)); + + taosArrayPush(pJob->pTasks, &task); + + qDebug("QID:%" PRIx64 " task %d type %d initialized, dbFName:%s", pJob->queryId, taskIdx, task.type, dbFName); + + return TSDB_CODE_SUCCESS; +} + +int32_t ctgInitGetDbCfgTask(SCtgJob *pJob, int32_t taskIdx, char *dbFName) { + SCtgTask task = {0}; + + task.type = CTG_TASK_GET_DB_CFG; + task.taskId = taskIdx; + task.pJob = pJob; + + task.taskCtx = taosMemoryCalloc(1, sizeof(SCtgDbCfgCtx)); + if (NULL == task.taskCtx) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + + SCtgDbCfgCtx* ctx = task.taskCtx; + + memcpy(ctx->dbFName, dbFName, sizeof(ctx->dbFName)); + + taosArrayPush(pJob->pTasks, &task); + + qDebug("QID:%" PRIx64 " task %d type %d initialized, dbFName:%s", pJob->queryId, taskIdx, task.type, dbFName); + + return TSDB_CODE_SUCCESS; +} + +int32_t ctgInitGetDbInfoTask(SCtgJob *pJob, int32_t taskIdx, char *dbFName) { + SCtgTask task = {0}; + + task.type = CTG_TASK_GET_DB_INFO; + task.taskId = taskIdx; + task.pJob = pJob; + + task.taskCtx = taosMemoryCalloc(1, sizeof(SCtgDbInfoCtx)); + if (NULL == task.taskCtx) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + + SCtgDbInfoCtx* ctx = task.taskCtx; + + memcpy(ctx->dbFName, dbFName, sizeof(ctx->dbFName)); + + taosArrayPush(pJob->pTasks, &task); + + qDebug("QID:%" PRIx64 " task %d type %d initialized, dbFName:%s", pJob->queryId, taskIdx, task.type, dbFName); + + return TSDB_CODE_SUCCESS; +} + + +int32_t ctgInitGetTbHashTask(SCtgJob *pJob, int32_t taskIdx, SName *name) { + SCtgTask task = {0}; + + task.type = CTG_TASK_GET_TB_HASH; + task.taskId = taskIdx; + task.pJob = pJob; + + task.taskCtx = taosMemoryCalloc(1, sizeof(SCtgTbHashCtx)); + if (NULL == task.taskCtx) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + + SCtgTbHashCtx* ctx = task.taskCtx; + ctx->pName = taosMemoryMalloc(sizeof(*name)); + if (NULL == ctx->pName) { + taosMemoryFree(task.taskCtx); + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + + memcpy(ctx->pName, name, sizeof(*name)); + tNameGetFullDbName(ctx->pName, ctx->dbFName); + + taosArrayPush(pJob->pTasks, &task); + + qDebug("QID:%" PRIx64 " task %d type %d initialized, tableName:%s", pJob->queryId, taskIdx, task.type, name->tname); + + return TSDB_CODE_SUCCESS; +} + +int32_t ctgInitGetQnodeTask(SCtgJob *pJob, int32_t taskIdx) { + SCtgTask task = {0}; + + task.type = CTG_TASK_GET_QNODE; + task.taskId = taskIdx; + task.pJob = pJob; + task.taskCtx = NULL; + + taosArrayPush(pJob->pTasks, &task); + + qDebug("QID:%" PRIx64 " task %d type %d initialized", pJob->queryId, taskIdx, task.type); + + return TSDB_CODE_SUCCESS; +} + +int32_t ctgInitGetIndexTask(SCtgJob *pJob, int32_t taskIdx, char *name) { + SCtgTask task = {0}; + + task.type = CTG_TASK_GET_INDEX; + task.taskId = taskIdx; + task.pJob = pJob; + + task.taskCtx = taosMemoryCalloc(1, sizeof(SCtgIndexCtx)); + if (NULL == task.taskCtx) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + + SCtgIndexCtx* ctx = task.taskCtx; + + strcpy(ctx->indexFName, name); + + taosArrayPush(pJob->pTasks, &task); + + qDebug("QID:%" PRIx64 " task %d type %d initialized, indexFName:%s", pJob->queryId, taskIdx, task.type, name); + + return TSDB_CODE_SUCCESS; +} + +int32_t ctgInitGetUdfTask(SCtgJob *pJob, int32_t taskIdx, char *name) { + SCtgTask task = {0}; + + task.type = CTG_TASK_GET_UDF; + task.taskId = taskIdx; + task.pJob = pJob; + + task.taskCtx = taosMemoryCalloc(1, sizeof(SCtgUdfCtx)); + if (NULL == task.taskCtx) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + + SCtgUdfCtx* ctx = task.taskCtx; + + strcpy(ctx->udfName, name); + + taosArrayPush(pJob->pTasks, &task); + + qDebug("QID:%" PRIx64 " task %d type %d initialized, udfName:%s", pJob->queryId, taskIdx, task.type, name); + + return TSDB_CODE_SUCCESS; +} + +int32_t ctgInitGetUserTask(SCtgJob *pJob, int32_t taskIdx, SUserAuthInfo *user) { + SCtgTask task = {0}; + + task.type = CTG_TASK_GET_USER; + task.taskId = taskIdx; + task.pJob = pJob; + + task.taskCtx = taosMemoryCalloc(1, sizeof(SCtgUserCtx)); + if (NULL == task.taskCtx) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + + SCtgUserCtx* ctx = task.taskCtx; + + memcpy(&ctx->user, user, sizeof(*user)); + + taosArrayPush(pJob->pTasks, &task); + + qDebug("QID:%" PRIx64 " task %d type %d initialized, user:%s", pJob->queryId, taskIdx, task.type, user->user); + + return TSDB_CODE_SUCCESS; +} + + +int32_t ctgInitJob(CTG_PARAMS, SCtgJob** job, uint64_t reqId, const SCatalogReq* pReq, catalogCallback fp, void* param) { + int32_t code = 0; + int32_t tbMetaNum = (int32_t)taosArrayGetSize(pReq->pTableMeta); + int32_t dbVgNum = (int32_t)taosArrayGetSize(pReq->pDbVgroup); + int32_t tbHashNum = (int32_t)taosArrayGetSize(pReq->pTableHash); + int32_t udfNum = (int32_t)taosArrayGetSize(pReq->pUdf); + int32_t qnodeNum = pReq->qNodeRequired ? 1 : 0; + int32_t dbCfgNum = (int32_t)taosArrayGetSize(pReq->pDbCfg); + int32_t indexNum = (int32_t)taosArrayGetSize(pReq->pIndex); + int32_t userNum = (int32_t)taosArrayGetSize(pReq->pUser); + int32_t dbInfoNum = (int32_t)taosArrayGetSize(pReq->pDbInfo); + + int32_t taskNum = tbMetaNum + dbVgNum + udfNum + tbHashNum + qnodeNum + dbCfgNum + indexNum + userNum + dbInfoNum; + if (taskNum <= 0) { + ctgError("empty input for job, taskNum:%d", taskNum); + CTG_ERR_RET(TSDB_CODE_CTG_INVALID_INPUT); + } + + *job = taosMemoryCalloc(1, sizeof(SCtgJob)); + if (NULL == *job) { + ctgError("calloc %d failed", (int32_t)sizeof(SCtgJob)); + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + + SCtgJob *pJob = *job; + + pJob->queryId = reqId; + pJob->userFp = fp; + pJob->pCtg = pCtg; + pJob->pTrans = pTrans; + pJob->pMgmtEps = *pMgmtEps; + pJob->userParam = param; + + pJob->tbMetaNum = tbMetaNum; + pJob->tbHashNum = tbHashNum; + pJob->qnodeNum = qnodeNum; + pJob->dbVgNum = dbVgNum; + pJob->udfNum = udfNum; + pJob->dbCfgNum = dbCfgNum; + pJob->indexNum = indexNum; + pJob->userNum = userNum; + pJob->dbInfoNum = dbInfoNum; + + pJob->pTasks = taosArrayInit(taskNum, sizeof(SCtgTask)); + + if (NULL == pJob->pTasks) { + ctgError("taosArrayInit %d tasks failed", taskNum); + CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY); + } + + int32_t taskIdx = 0; + for (int32_t i = 0; i < dbVgNum; ++i) { + char *dbFName = taosArrayGet(pReq->pDbVgroup, i); + CTG_ERR_JRET(ctgInitGetDbVgTask(pJob, taskIdx++, dbFName)); + } + + for (int32_t i = 0; i < dbCfgNum; ++i) { + char *dbFName = taosArrayGet(pReq->pDbCfg, i); + CTG_ERR_JRET(ctgInitGetDbCfgTask(pJob, taskIdx++, dbFName)); + } + + for (int32_t i = 0; i < dbInfoNum; ++i) { + char *dbFName = taosArrayGet(pReq->pDbInfo, i); + CTG_ERR_JRET(ctgInitGetDbInfoTask(pJob, taskIdx++, dbFName)); + } + + for (int32_t i = 0; i < tbMetaNum; ++i) { + SName *name = taosArrayGet(pReq->pTableMeta, i); + CTG_ERR_JRET(ctgInitGetTbMetaTask(pJob, taskIdx++, name)); + } + + for (int32_t i = 0; i < tbHashNum; ++i) { + SName *name = taosArrayGet(pReq->pTableHash, i); + CTG_ERR_JRET(ctgInitGetTbHashTask(pJob, taskIdx++, name)); + } + + for (int32_t i = 0; i < indexNum; ++i) { + char *indexName = taosArrayGet(pReq->pIndex, i); + CTG_ERR_JRET(ctgInitGetIndexTask(pJob, taskIdx++, indexName)); + } + + for (int32_t i = 0; i < udfNum; ++i) { + char *udfName = taosArrayGet(pReq->pUdf, i); + CTG_ERR_JRET(ctgInitGetUdfTask(pJob, taskIdx++, udfName)); + } + + for (int32_t i = 0; i < userNum; ++i) { + SUserAuthInfo *user = taosArrayGet(pReq->pUser, i); + CTG_ERR_JRET(ctgInitGetUserTask(pJob, taskIdx++, user)); + } + + if (qnodeNum) { + CTG_ERR_JRET(ctgInitGetQnodeTask(pJob, taskIdx++)); + } + + pJob->refId = taosAddRef(gCtgMgmt.jobPool, pJob); + if (pJob->refId < 0) { + ctgError("add job to ref failed, error: %s", tstrerror(terrno)); + CTG_ERR_JRET(terrno); + } + + taosAcquireRef(gCtgMgmt.jobPool, pJob->refId); + + qDebug("QID:%" PRIx64 ", job %" PRIx64 " initialized, task num %d", pJob->queryId, pJob->refId, taskNum); + + return TSDB_CODE_SUCCESS; + +_return: + + taosMemoryFreeClear(*job); + + CTG_RET(code); +} + +int32_t ctgDumpTbMetaRes(SCtgTask* pTask) { + SCtgJob* pJob = pTask->pJob; + if (NULL == pJob->jobRes.pTableMeta) { + pJob->jobRes.pTableMeta = taosArrayInit(pJob->tbMetaNum, POINTER_BYTES); + if (NULL == pJob->jobRes.pTableMeta) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + } + + taosArrayPush(pJob->jobRes.pTableMeta, &pTask->res); + + return TSDB_CODE_SUCCESS; +} + +int32_t ctgDumpDbVgRes(SCtgTask* pTask) { + SCtgJob* pJob = pTask->pJob; + if (NULL == pJob->jobRes.pDbVgroup) { + pJob->jobRes.pDbVgroup = taosArrayInit(pJob->dbVgNum, POINTER_BYTES); + if (NULL == pJob->jobRes.pDbVgroup) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + } + + taosArrayPush(pJob->jobRes.pDbVgroup, &pTask->res); + pTask->res = NULL; + + return TSDB_CODE_SUCCESS; +} + +int32_t ctgDumpTbHashRes(SCtgTask* pTask) { + SCtgJob* pJob = pTask->pJob; + if (NULL == pJob->jobRes.pTableHash) { + pJob->jobRes.pTableHash = taosArrayInit(pJob->tbHashNum, sizeof(SVgroupInfo)); + if (NULL == pJob->jobRes.pTableHash) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + } + + taosArrayPush(pJob->jobRes.pTableHash, pTask->res); + + return TSDB_CODE_SUCCESS; +} + +int32_t ctgDumpIndexRes(SCtgTask* pTask) { + SCtgJob* pJob = pTask->pJob; + if (NULL == pJob->jobRes.pIndex) { + pJob->jobRes.pIndex = taosArrayInit(pJob->indexNum, sizeof(SIndexInfo)); + if (NULL == pJob->jobRes.pIndex) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + } + + taosArrayPush(pJob->jobRes.pIndex, pTask->res); + + return TSDB_CODE_SUCCESS; +} + +int32_t ctgDumpQnodeRes(SCtgTask* pTask) { + SCtgJob* pJob = pTask->pJob; + + TSWAP(pJob->jobRes.pQnodeList, pTask->res); + + return TSDB_CODE_SUCCESS; +} + +int32_t ctgDumpDbCfgRes(SCtgTask* pTask) { + SCtgJob* pJob = pTask->pJob; + if (NULL == pJob->jobRes.pDbCfg) { + pJob->jobRes.pDbCfg = taosArrayInit(pJob->dbCfgNum, sizeof(SDbCfgInfo)); + if (NULL == pJob->jobRes.pDbCfg) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + } + + taosArrayPush(pJob->jobRes.pDbCfg, pTask->res); + + return TSDB_CODE_SUCCESS; +} + +int32_t ctgDumpDbInfoRes(SCtgTask* pTask) { + SCtgJob* pJob = pTask->pJob; + if (NULL == pJob->jobRes.pDbInfo) { + pJob->jobRes.pDbInfo = taosArrayInit(pJob->dbInfoNum, sizeof(SDbInfo)); + if (NULL == pJob->jobRes.pDbInfo) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + } + + taosArrayPush(pJob->jobRes.pDbInfo, pTask->res); + + return TSDB_CODE_SUCCESS; +} + +int32_t ctgDumpUdfRes(SCtgTask* pTask) { + SCtgJob* pJob = pTask->pJob; + if (NULL == pJob->jobRes.pUdfList) { + pJob->jobRes.pUdfList = taosArrayInit(pJob->udfNum, sizeof(SFuncInfo)); + if (NULL == pJob->jobRes.pUdfList) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + } + + taosArrayPush(pJob->jobRes.pUdfList, pTask->res); + + return TSDB_CODE_SUCCESS; +} + +int32_t ctgDumpUserRes(SCtgTask* pTask) { + SCtgJob* pJob = pTask->pJob; + if (NULL == pJob->jobRes.pUser) { + pJob->jobRes.pUser = taosArrayInit(pJob->userNum, sizeof(bool)); + if (NULL == pJob->jobRes.pUser) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + } + + taosArrayPush(pJob->jobRes.pUser, pTask->res); + + return TSDB_CODE_SUCCESS; +} + +int32_t ctgHandleTaskEnd(SCtgTask* pTask, int32_t rspCode) { + SCtgJob* pJob = pTask->pJob; + int32_t code = 0; + + qDebug("QID:%" PRIx64 " task %d end with rsp %s", pJob->queryId, pTask->taskId, tstrerror(rspCode)); + + if (rspCode) { + int32_t lastCode = atomic_val_compare_exchange_32(&pJob->rspCode, 0, rspCode); + if (0 == lastCode) { + CTG_ERR_JRET(rspCode); + } + + return TSDB_CODE_SUCCESS; + } + + int32_t taskDone = atomic_add_fetch_32(&pJob->taskDone, 1); + if (taskDone < taosArrayGetSize(pJob->pTasks)) { + qDebug("task done: %d, total: %d", taskDone, (int32_t)taosArrayGetSize(pJob->pTasks)); + return TSDB_CODE_SUCCESS; + } + + CTG_ERR_JRET(ctgMakeAsyncRes(pJob)); + +_return: + + qDebug("QID:%" PRIx64 " user callback with rsp %s", pJob->queryId, tstrerror(code)); + + (*pJob->userFp)(&pJob->jobRes, pJob->userParam, code); + + taosRemoveRef(gCtgMgmt.jobPool, pJob->refId); + + CTG_RET(code); +} + +int32_t ctgHandleGetTbMetaRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) { + int32_t code = 0; + SCtgDBCache *dbCache = NULL; + CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target)); + + SCtgTbMetaCtx* ctx = (SCtgTbMetaCtx*)pTask->taskCtx; + SCatalog* pCtg = pTask->pJob->pCtg; + void *pTrans = pTask->pJob->pTrans; + const SEpSet* pMgmtEps = &pTask->pJob->pMgmtEps; + + switch (reqType) { + case TDMT_MND_USE_DB: { + SUseDbOutput* pOut = (SUseDbOutput*)pTask->msgCtx.out; + + SVgroupInfo vgInfo = {0}; + CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, pOut->dbVgroup, ctx->pName, &vgInfo)); + + ctgDebug("will refresh tbmeta, not supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(ctx->pName), ctx->flag); + + CTG_ERR_JRET(ctgGetTbMetaFromVnode(CTG_PARAMS_LIST(), ctx->pName, &vgInfo, NULL, pTask)); + + return TSDB_CODE_SUCCESS; + } + case TDMT_MND_TABLE_META: { + STableMetaOutput* pOut = (STableMetaOutput*)pTask->msgCtx.out; + + if (CTG_IS_META_NULL(pOut->metaType)) { + if (CTG_FLAG_IS_STB(ctx->flag)) { + char dbFName[TSDB_DB_FNAME_LEN] = {0}; + tNameGetFullDbName(ctx->pName, dbFName); + + CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, dbFName, &dbCache)); + if (NULL != dbCache) { + SVgroupInfo vgInfo = {0}; + CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgInfo, ctx->pName, &vgInfo)); + + ctgDebug("will refresh tbmeta, not supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(ctx->pName), ctx->flag); + + CTG_ERR_JRET(ctgGetTbMetaFromVnode(CTG_PARAMS_LIST(), ctx->pName, &vgInfo, NULL, pTask)); + + ctgReleaseVgInfo(dbCache); + ctgReleaseDBCache(pCtg, dbCache); + } else { + SBuildUseDBInput input = {0}; + + tstrncpy(input.db, dbFName, tListLen(input.db)); + input.vgVersion = CTG_DEFAULT_INVALID_VERSION; + + CTG_ERR_JRET(ctgGetDBVgInfoFromMnode(pCtg, pTrans, pMgmtEps, &input, NULL, pTask)); + } + + return TSDB_CODE_SUCCESS; + } + + ctgError("no tbmeta got, tbNmae:%s", tNameGetTableName(ctx->pName)); + ctgRemoveTbMetaFromCache(pCtg, ctx->pName, false); + + CTG_ERR_JRET(CTG_ERR_CODE_TABLE_NOT_EXIST); + } + + if (pTask->msgCtx.lastOut) { + TSWAP(pTask->msgCtx.out, pTask->msgCtx.lastOut); + STableMetaOutput* pLastOut = (STableMetaOutput*)pTask->msgCtx.out; + TSWAP(pLastOut->tbMeta, pOut->tbMeta); + } + + break; + } + case TDMT_VND_TABLE_META: { + STableMetaOutput* pOut = (STableMetaOutput*)pTask->msgCtx.out; + + if (CTG_IS_META_NULL(pOut->metaType)) { + ctgError("no tbmeta got, tbNmae:%s", tNameGetTableName(ctx->pName)); + ctgRemoveTbMetaFromCache(pCtg, ctx->pName, false); + CTG_ERR_JRET(CTG_ERR_CODE_TABLE_NOT_EXIST); + } + + if (CTG_FLAG_IS_STB(ctx->flag)) { + break; + } + + if (CTG_IS_META_TABLE(pOut->metaType) && TSDB_SUPER_TABLE == pOut->tbMeta->tableType) { + ctgDebug("will continue to refresh tbmeta since got stb, tbName:%s", tNameGetTableName(ctx->pName)); + + taosMemoryFreeClear(pOut->tbMeta); + + CTG_RET(ctgGetTbMetaFromMnode(CTG_PARAMS_LIST(), ctx->pName, NULL, pTask)); + } else if (CTG_IS_META_BOTH(pOut->metaType)) { + int32_t exist = 0; + if (!CTG_FLAG_IS_FORCE_UPDATE(ctx->flag)) { + CTG_ERR_JRET(ctgTbMetaExistInCache(pCtg, pOut->dbFName, pOut->tbName, &exist)); + } + + if (0 == exist) { + TSWAP(pTask->msgCtx.lastOut, pTask->msgCtx.out); + CTG_RET(ctgGetTbMetaFromMnodeImpl(CTG_PARAMS_LIST(), pOut->dbFName, pOut->tbName, NULL, pTask)); + } else { + taosMemoryFreeClear(pOut->tbMeta); + + SET_META_TYPE_CTABLE(pOut->metaType); + } + } + break; + } + default: + ctgError("invalid reqType %d", reqType); + CTG_ERR_JRET(TSDB_CODE_INVALID_MSG); + break; + } + + STableMetaOutput* pOut = (STableMetaOutput*)pTask->msgCtx.out; + + ctgUpdateTbMetaToCache(pCtg, pOut, false); + + if (CTG_IS_META_BOTH(pOut->metaType)) { + memcpy(pOut->tbMeta, &pOut->ctbMeta, sizeof(pOut->ctbMeta)); + } else if (CTG_IS_META_CTABLE(pOut->metaType)) { + SName stbName = *ctx->pName; + strcpy(stbName.tname, pOut->tbName); + SCtgTbMetaCtx stbCtx = {0}; + stbCtx.flag = ctx->flag; + stbCtx.pName = &stbName; + + CTG_ERR_JRET(ctgReadTbMetaFromCache(pCtg, &stbCtx, &pOut->tbMeta)); + if (NULL == pOut->tbMeta) { + ctgDebug("stb no longer exist, stbName:%s", stbName.tname); + CTG_ERR_JRET(ctgRelaunchGetTbMetaTask(pTask)); + + return TSDB_CODE_SUCCESS; + } + + memcpy(pOut->tbMeta, &pOut->ctbMeta, sizeof(pOut->ctbMeta)); + } + + TSWAP(pTask->res, pOut->tbMeta); + +_return: + + if (dbCache) { + ctgReleaseVgInfo(dbCache); + ctgReleaseDBCache(pCtg, dbCache); + } + + ctgHandleTaskEnd(pTask, code); + + CTG_RET(code); +} + +int32_t ctgHandleGetDbVgRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) { + int32_t code = 0; + CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target)); + + SCtgDbVgCtx* ctx = (SCtgDbVgCtx*)pTask->taskCtx; + SCatalog* pCtg = pTask->pJob->pCtg; + void *pTrans = pTask->pJob->pTrans; + const SEpSet* pMgmtEps = &pTask->pJob->pMgmtEps; + + switch (reqType) { + case TDMT_MND_USE_DB: { + SUseDbOutput* pOut = (SUseDbOutput*)pTask->msgCtx.out; + + CTG_ERR_JRET(ctgGenerateVgList(pCtg, pOut->dbVgroup->vgHash, (SArray**)&pTask->res)); + + CTG_ERR_JRET(ctgUpdateVgroupEnqueue(pCtg, ctx->dbFName, pOut->dbId, pOut->dbVgroup, false)); + pOut->dbVgroup = NULL; + + break; + } + default: + ctgError("invalid reqType %d", reqType); + CTG_ERR_JRET(TSDB_CODE_INVALID_MSG); + break; + } + + +_return: + + ctgHandleTaskEnd(pTask, code); + + CTG_RET(code); +} + +int32_t ctgHandleGetTbHashRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) { + int32_t code = 0; + CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target)); + + SCtgTbHashCtx* ctx = (SCtgTbHashCtx*)pTask->taskCtx; + SCatalog* pCtg = pTask->pJob->pCtg; + void *pTrans = pTask->pJob->pTrans; + const SEpSet* pMgmtEps = &pTask->pJob->pMgmtEps; + + switch (reqType) { + case TDMT_MND_USE_DB: { + SUseDbOutput* pOut = (SUseDbOutput*)pTask->msgCtx.out; + + pTask->res = taosMemoryMalloc(sizeof(SVgroupInfo)); + if (NULL == pTask->res) { + CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY); + } + + CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, pOut->dbVgroup, ctx->pName, (SVgroupInfo*)pTask->res)); + + CTG_ERR_JRET(ctgUpdateVgroupEnqueue(pCtg, ctx->dbFName, pOut->dbId, pOut->dbVgroup, false)); + pOut->dbVgroup = NULL; + + break; + } + default: + ctgError("invalid reqType %d", reqType); + CTG_ERR_JRET(TSDB_CODE_INVALID_MSG); + break; + } + + +_return: + + ctgHandleTaskEnd(pTask, code); + + CTG_RET(code); +} + +int32_t ctgHandleGetDbCfgRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) { + int32_t code = 0; + CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target)); + + TSWAP(pTask->res, pTask->msgCtx.out); + +_return: + + ctgHandleTaskEnd(pTask, code); + + CTG_RET(code); +} + +int32_t ctgHandleGetDbInfoRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) { + CTG_RET(TSDB_CODE_APP_ERROR); +} + + +int32_t ctgHandleGetQnodeRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) { + int32_t code = 0; + CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target)); + + TSWAP(pTask->res, pTask->msgCtx.out); + +_return: + + ctgHandleTaskEnd(pTask, code); + + CTG_RET(code); +} + +int32_t ctgHandleGetIndexRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) { + int32_t code = 0; + CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target)); + + TSWAP(pTask->res, pTask->msgCtx.out); + +_return: + + ctgHandleTaskEnd(pTask, code); + + CTG_RET(code); +} + +int32_t ctgHandleGetUdfRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) { + int32_t code = 0; + CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target)); + + TSWAP(pTask->res, pTask->msgCtx.out); + +_return: + + ctgHandleTaskEnd(pTask, code); + + CTG_RET(code); +} + +int32_t ctgHandleGetUserRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) { + int32_t code = 0; + SCtgDBCache *dbCache = NULL; + CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target)); + + SCtgUserCtx* ctx = (SCtgUserCtx*)pTask->taskCtx; + SCatalog* pCtg = pTask->pJob->pCtg; + void *pTrans = pTask->pJob->pTrans; + const SEpSet* pMgmtEps = &pTask->pJob->pMgmtEps; + bool pass = false; + SGetUserAuthRsp* pOut = (SGetUserAuthRsp*)pTask->msgCtx.out; + + if (pOut->superAuth) { + pass = true; + goto _return; + } + + if (pOut->createdDbs && taosHashGet(pOut->createdDbs, ctx->user.dbFName, strlen(ctx->user.dbFName))) { + pass = true; + goto _return; + } + + if (ctx->user.type == AUTH_TYPE_READ && pOut->readDbs && taosHashGet(pOut->readDbs, ctx->user.dbFName, strlen(ctx->user.dbFName))) { + pass = true; + } else if (ctx->user.type == AUTH_TYPE_WRITE && pOut->writeDbs && taosHashGet(pOut->writeDbs, ctx->user.dbFName, strlen(ctx->user.dbFName))) { + pass = true; + } + +_return: + + if (TSDB_CODE_SUCCESS == code) { + pTask->res = taosMemoryCalloc(1, sizeof(bool)); + if (NULL == pTask->res) { + code = TSDB_CODE_OUT_OF_MEMORY; + } else { + *(bool*)pTask->res = pass; + } + } + + ctgUpdateUserEnqueue(pCtg, pOut, false); + taosMemoryFreeClear(pTask->msgCtx.out); + + ctgHandleTaskEnd(pTask, code); + + CTG_RET(code); +} + +int32_t ctgAsyncRefreshTbMeta(SCtgTask *pTask) { + SCatalog* pCtg = pTask->pJob->pCtg; + void *pTrans = pTask->pJob->pTrans; + const SEpSet* pMgmtEps = &pTask->pJob->pMgmtEps; + int32_t code = 0; + SCtgTbMetaCtx* ctx = (SCtgTbMetaCtx*)pTask->taskCtx; + + if (CTG_FLAG_IS_SYS_DB(ctx->flag)) { + ctgDebug("will refresh sys db tbmeta, tbName:%s", tNameGetTableName(ctx->pName)); + + CTG_RET(ctgGetTbMetaFromMnodeImpl(CTG_PARAMS_LIST(), (char *)ctx->pName->dbname, (char *)ctx->pName->tname, NULL, pTask)); + } + + if (CTG_FLAG_IS_STB(ctx->flag)) { + ctgDebug("will refresh tbmeta, supposed to be stb, tbName:%s", tNameGetTableName(ctx->pName)); + + // if get from mnode failed, will not try vnode + CTG_RET(ctgGetTbMetaFromMnode(CTG_PARAMS_LIST(), ctx->pName, NULL, pTask)); + } + + SCtgDBCache *dbCache = NULL; + char dbFName[TSDB_DB_FNAME_LEN] = {0}; + tNameGetFullDbName(ctx->pName, dbFName); + + CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, dbFName, &dbCache)); + if (dbCache) { + SVgroupInfo vgInfo = {0}; + CTG_ERR_RET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgInfo, ctx->pName, &vgInfo)); + + ctgDebug("will refresh tbmeta, not supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(ctx->pName), ctx->flag); + + CTG_ERR_JRET(ctgGetTbMetaFromVnode(CTG_PARAMS_LIST(), ctx->pName, &vgInfo, NULL, pTask)); + } else { + SBuildUseDBInput input = {0}; + + tstrncpy(input.db, dbFName, tListLen(input.db)); + input.vgVersion = CTG_DEFAULT_INVALID_VERSION; + + CTG_ERR_JRET(ctgGetDBVgInfoFromMnode(pCtg, pTrans, pMgmtEps, &input, NULL, pTask)); + } + +_return: + + if (dbCache) { + ctgReleaseVgInfo(dbCache); + ctgReleaseDBCache(pCtg, dbCache); + } + + CTG_RET(code); +} + +int32_t ctgLaunchGetTbMetaTask(SCtgTask *pTask) { + SCatalog* pCtg = pTask->pJob->pCtg; + void *pTrans = pTask->pJob->pTrans; + const SEpSet* pMgmtEps = &pTask->pJob->pMgmtEps; + + CTG_ERR_RET(ctgGetTbMetaFromCache(CTG_PARAMS_LIST(), (SCtgTbMetaCtx*)pTask->taskCtx, (STableMeta**)&pTask->res)); + if (pTask->res) { + CTG_ERR_RET(ctgHandleTaskEnd(pTask, 0)); + return TSDB_CODE_SUCCESS; + } + + CTG_ERR_RET(ctgAsyncRefreshTbMeta(pTask)); + + return TSDB_CODE_SUCCESS; +} + +int32_t ctgLaunchGetDbVgTask(SCtgTask *pTask) { + int32_t code = 0; + SCatalog* pCtg = pTask->pJob->pCtg; + void *pTrans = pTask->pJob->pTrans; + const SEpSet* pMgmtEps = &pTask->pJob->pMgmtEps; + SCtgDBCache *dbCache = NULL; + SCtgDbVgCtx* pCtx = (SCtgDbVgCtx*)pTask->taskCtx; + + CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, pCtx->dbFName, &dbCache)); + if (NULL != dbCache) { + CTG_ERR_JRET(ctgGenerateVgList(pCtg, dbCache->vgInfo->vgHash, (SArray**)&pTask->res)); + + CTG_ERR_JRET(ctgHandleTaskEnd(pTask, 0)); + } else { + SBuildUseDBInput input = {0}; + + tstrncpy(input.db, pCtx->dbFName, tListLen(input.db)); + input.vgVersion = CTG_DEFAULT_INVALID_VERSION; + + CTG_ERR_RET(ctgGetDBVgInfoFromMnode(pCtg, pTrans, pMgmtEps, &input, NULL, pTask)); + } + +_return: + + if (dbCache) { + ctgReleaseVgInfo(dbCache); + ctgReleaseDBCache(pCtg, dbCache); + } + + CTG_RET(code); +} + +int32_t ctgLaunchGetTbHashTask(SCtgTask *pTask) { + int32_t code = 0; + SCatalog* pCtg = pTask->pJob->pCtg; + void *pTrans = pTask->pJob->pTrans; + const SEpSet* pMgmtEps = &pTask->pJob->pMgmtEps; + SCtgDBCache *dbCache = NULL; + SCtgTbHashCtx* pCtx = (SCtgTbHashCtx*)pTask->taskCtx; + + CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, pCtx->dbFName, &dbCache)); + if (NULL != dbCache) { + pTask->res = taosMemoryMalloc(sizeof(SVgroupInfo)); + if (NULL == pTask->res) { + CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY); + } + CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgInfo, pCtx->pName, (SVgroupInfo*)pTask->res)); + + CTG_ERR_JRET(ctgHandleTaskEnd(pTask, 0)); + } else { + SBuildUseDBInput input = {0}; + + tstrncpy(input.db, pCtx->dbFName, tListLen(input.db)); + input.vgVersion = CTG_DEFAULT_INVALID_VERSION; + + CTG_ERR_RET(ctgGetDBVgInfoFromMnode(pCtg, pTrans, pMgmtEps, &input, NULL, pTask)); + } + +_return: + + if (dbCache) { + ctgReleaseVgInfo(dbCache); + ctgReleaseDBCache(pCtg, dbCache); + } + + CTG_RET(code); +} + +int32_t ctgLaunchGetQnodeTask(SCtgTask *pTask) { + SCatalog* pCtg = pTask->pJob->pCtg; + void *pTrans = pTask->pJob->pTrans; + const SEpSet* pMgmtEps = &pTask->pJob->pMgmtEps; + + CTG_ERR_RET(ctgGetQnodeListFromMnode(CTG_PARAMS_LIST(), NULL, pTask)); + + return TSDB_CODE_SUCCESS; +} + +int32_t ctgLaunchGetDbCfgTask(SCtgTask *pTask) { + SCatalog* pCtg = pTask->pJob->pCtg; + void *pTrans = pTask->pJob->pTrans; + const SEpSet* pMgmtEps = &pTask->pJob->pMgmtEps; + SCtgDbCfgCtx* pCtx = (SCtgDbCfgCtx*)pTask->taskCtx; + + CTG_ERR_RET(ctgGetDBCfgFromMnode(CTG_PARAMS_LIST(), pCtx->dbFName, NULL, pTask)); + + return TSDB_CODE_SUCCESS; +} + +int32_t ctgLaunchGetDbInfoTask(SCtgTask *pTask) { + int32_t code = 0; + SCatalog* pCtg = pTask->pJob->pCtg; + void *pTrans = pTask->pJob->pTrans; + const SEpSet* pMgmtEps = &pTask->pJob->pMgmtEps; + SCtgDBCache *dbCache = NULL; + SCtgDbInfoCtx* pCtx = (SCtgDbInfoCtx*)pTask->taskCtx; + + pTask->res = taosMemoryCalloc(1, sizeof(SDbInfo)); + if (NULL == pTask->res) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + + SDbInfo* pInfo = (SDbInfo*)pTask->res; + CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, pCtx->dbFName, &dbCache)); + if (NULL != dbCache) { + pInfo->vgVer = dbCache->vgInfo->vgVersion; + pInfo->dbId = dbCache->dbId; + pInfo->tbNum = dbCache->vgInfo->numOfTable; + } else { + pInfo->vgVer = CTG_DEFAULT_INVALID_VERSION; + } + + CTG_ERR_JRET(ctgHandleTaskEnd(pTask, 0)); + +_return: + + if (dbCache) { + ctgReleaseVgInfo(dbCache); + ctgReleaseDBCache(pCtg, dbCache); + } + + CTG_RET(code); +} + +int32_t ctgLaunchGetIndexTask(SCtgTask *pTask) { + SCatalog* pCtg = pTask->pJob->pCtg; + void *pTrans = pTask->pJob->pTrans; + const SEpSet* pMgmtEps = &pTask->pJob->pMgmtEps; + SCtgIndexCtx* pCtx = (SCtgIndexCtx*)pTask->taskCtx; + + CTG_ERR_RET(ctgGetIndexInfoFromMnode(CTG_PARAMS_LIST(), pCtx->indexFName, NULL, pTask)); + + return TSDB_CODE_SUCCESS; +} + +int32_t ctgLaunchGetUdfTask(SCtgTask *pTask) { + SCatalog* pCtg = pTask->pJob->pCtg; + void *pTrans = pTask->pJob->pTrans; + const SEpSet* pMgmtEps = &pTask->pJob->pMgmtEps; + SCtgUdfCtx* pCtx = (SCtgUdfCtx*)pTask->taskCtx; + + CTG_ERR_RET(ctgGetUdfInfoFromMnode(CTG_PARAMS_LIST(), pCtx->udfName, NULL, pTask)); + + return TSDB_CODE_SUCCESS; +} + +int32_t ctgLaunchGetUserTask(SCtgTask *pTask) { + SCatalog* pCtg = pTask->pJob->pCtg; + void *pTrans = pTask->pJob->pTrans; + const SEpSet* pMgmtEps = &pTask->pJob->pMgmtEps; + SCtgUserCtx* pCtx = (SCtgUserCtx*)pTask->taskCtx; + bool inCache = false; + bool pass = false; + + CTG_ERR_RET(ctgChkAuthFromCache(pCtg, pCtx->user.user, pCtx->user.dbFName, pCtx->user.type, &inCache, &pass)); + if (inCache) { + pTask->res = taosMemoryCalloc(1, sizeof(bool)); + if (NULL == pTask->res) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + *(bool*)pTask->res = pass; + + CTG_ERR_RET(ctgHandleTaskEnd(pTask, 0)); + return TSDB_CODE_SUCCESS; + } + + CTG_ERR_RET(ctgGetUserDbAuthFromMnode(CTG_PARAMS_LIST(), pCtx->user.user, NULL, pTask)); + + return TSDB_CODE_SUCCESS; +} + +int32_t ctgRelaunchGetTbMetaTask(SCtgTask *pTask) { + ctgResetTbMetaTask(pTask); + + CTG_ERR_RET(ctgLaunchGetTbMetaTask(pTask)); + + return TSDB_CODE_SUCCESS; +} + +SCtgAsyncFps gCtgAsyncFps[] = { + {ctgLaunchGetQnodeTask, ctgHandleGetQnodeRsp, ctgDumpQnodeRes}, + {ctgLaunchGetDbVgTask, ctgHandleGetDbVgRsp, ctgDumpDbVgRes}, + {ctgLaunchGetDbCfgTask, ctgHandleGetDbCfgRsp, ctgDumpDbCfgRes}, + {ctgLaunchGetDbInfoTask, ctgHandleGetDbInfoRsp, ctgDumpDbInfoRes}, + {ctgLaunchGetTbMetaTask, ctgHandleGetTbMetaRsp, ctgDumpTbMetaRes}, + {ctgLaunchGetTbHashTask, ctgHandleGetTbHashRsp, ctgDumpTbHashRes}, + {ctgLaunchGetIndexTask, ctgHandleGetIndexRsp, ctgDumpIndexRes}, + {ctgLaunchGetUdfTask, ctgHandleGetUdfRsp, ctgDumpUdfRes}, + {ctgLaunchGetUserTask, ctgHandleGetUserRsp, ctgDumpUserRes}, +}; + +int32_t ctgMakeAsyncRes(SCtgJob *pJob) { + int32_t code = 0; + int32_t taskNum = taosArrayGetSize(pJob->pTasks); + + for (int32_t i = 0; i < taskNum; ++i) { + SCtgTask *pTask = taosArrayGet(pJob->pTasks, i); + CTG_ERR_RET((*gCtgAsyncFps[pTask->type].dumpResFp)(pTask)); + } + + return TSDB_CODE_SUCCESS; +} + + +int32_t ctgLaunchJob(SCtgJob *pJob) { + int32_t taskNum = taosArrayGetSize(pJob->pTasks); + + for (int32_t i = 0; i < taskNum; ++i) { + SCtgTask *pTask = taosArrayGet(pJob->pTasks, i); + + qDebug("QID:%" PRIx64 " start to launch task %d", pJob->queryId, pTask->taskId); + CTG_ERR_RET((*gCtgAsyncFps[pTask->type].launchFp)(pTask)); + } + + return TSDB_CODE_SUCCESS; +} + + + diff --git a/source/libs/catalog/src/ctgCache.c b/source/libs/catalog/src/ctgCache.c new file mode 100644 index 0000000000000000000000000000000000000000..d1e2056becc86c1ad8f36f4d8ea3bfffe9acb97a --- /dev/null +++ b/source/libs/catalog/src/ctgCache.c @@ -0,0 +1,1609 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "trpc.h" +#include "query.h" +#include "tname.h" +#include "catalogInt.h" +#include "systable.h" + +SCtgOperation gCtgCacheOperation[CTG_OP_MAX] = { + { + CTG_OP_UPDATE_VGROUP, + "update vgInfo", + ctgOpUpdateVgroup + }, + { + CTG_OP_UPDATE_TB_META, + "update tbMeta", + ctgOpUpdateTbMeta + }, + { + CTG_OP_DROP_DB_CACHE, + "drop DB", + ctgOpDropDbCache + }, + { + CTG_OP_DROP_STB_META, + "drop stbMeta", + ctgOpDropStbMeta + }, + { + CTG_OP_DROP_TB_META, + "drop tbMeta", + ctgOpDropTbMeta + }, + { + CTG_OP_UPDATE_USER, + "update user", + ctgOpUpdateUser + }, + { + CTG_OP_UPDATE_VG_EPSET, + "update epset", + ctgOpUpdateEpset + } + +}; + + + + +int32_t ctgAcquireVgInfo(SCatalog *pCtg, SCtgDBCache *dbCache, bool *inCache) { + CTG_LOCK(CTG_READ, &dbCache->vgLock); + + if (dbCache->deleted) { + CTG_UNLOCK(CTG_READ, &dbCache->vgLock); + + ctgDebug("db is dropping, dbId:%"PRIx64, dbCache->dbId); + + *inCache = false; + return TSDB_CODE_SUCCESS; + } + + + if (NULL == dbCache->vgInfo) { + CTG_UNLOCK(CTG_READ, &dbCache->vgLock); + + *inCache = false; + ctgDebug("db vgInfo is empty, dbId:%"PRIx64, dbCache->dbId); + return TSDB_CODE_SUCCESS; + } + + *inCache = true; + + return TSDB_CODE_SUCCESS; +} + +int32_t ctgWAcquireVgInfo(SCatalog *pCtg, SCtgDBCache *dbCache) { + CTG_LOCK(CTG_WRITE, &dbCache->vgLock); + + if (dbCache->deleted) { + ctgDebug("db is dropping, dbId:%"PRIx64, dbCache->dbId); + CTG_UNLOCK(CTG_WRITE, &dbCache->vgLock); + CTG_ERR_RET(TSDB_CODE_CTG_DB_DROPPED); + } + + return TSDB_CODE_SUCCESS; +} + +void ctgReleaseDBCache(SCatalog *pCtg, SCtgDBCache *dbCache) { + taosHashRelease(pCtg->dbCache, dbCache); +} + +void ctgReleaseVgInfo(SCtgDBCache *dbCache) { + CTG_UNLOCK(CTG_READ, &dbCache->vgLock); +} + +void ctgWReleaseVgInfo(SCtgDBCache *dbCache) { + CTG_UNLOCK(CTG_WRITE, &dbCache->vgLock); +} + + +int32_t ctgAcquireDBCacheImpl(SCatalog* pCtg, const char *dbFName, SCtgDBCache **pCache, bool acquire) { + char *p = strchr(dbFName, '.'); + if (p && CTG_IS_SYS_DBNAME(p + 1)) { + dbFName = p + 1; + } + + SCtgDBCache *dbCache = NULL; + if (acquire) { + dbCache = (SCtgDBCache *)taosHashAcquire(pCtg->dbCache, dbFName, strlen(dbFName)); + } else { + dbCache = (SCtgDBCache *)taosHashGet(pCtg->dbCache, dbFName, strlen(dbFName)); + } + + if (NULL == dbCache) { + *pCache = NULL; + ctgDebug("db not in cache, dbFName:%s", dbFName); + return TSDB_CODE_SUCCESS; + } + + if (dbCache->deleted) { + if (acquire) { + ctgReleaseDBCache(pCtg, dbCache); + } + + *pCache = NULL; + ctgDebug("db is removing from cache, dbFName:%s", dbFName); + return TSDB_CODE_SUCCESS; + } + + *pCache = dbCache; + + return TSDB_CODE_SUCCESS; +} + +int32_t ctgAcquireDBCache(SCatalog* pCtg, const char *dbFName, SCtgDBCache **pCache) { + CTG_RET(ctgAcquireDBCacheImpl(pCtg, dbFName, pCache, true)); +} + +int32_t ctgGetDBCache(SCatalog* pCtg, const char *dbFName, SCtgDBCache **pCache) { + CTG_RET(ctgAcquireDBCacheImpl(pCtg, dbFName, pCache, false)); +} + + +int32_t ctgAcquireVgInfoFromCache(SCatalog* pCtg, const char *dbFName, SCtgDBCache **pCache) { + SCtgDBCache *dbCache = NULL; + + if (NULL == pCtg->dbCache) { + ctgDebug("empty db cache, dbFName:%s", dbFName); + goto _return; + } + + ctgAcquireDBCache(pCtg, dbFName, &dbCache); + if (NULL == dbCache) { + ctgDebug("db %s not in cache", dbFName); + goto _return; + } + + bool inCache = false; + ctgAcquireVgInfo(pCtg, dbCache, &inCache); + if (!inCache) { + ctgDebug("vgInfo of db %s not in cache", dbFName); + goto _return; + } + + *pCache = dbCache; + + CTG_CACHE_STAT_ADD(vgHitNum, 1); + + ctgDebug("Got db vgInfo from cache, dbFName:%s", dbFName); + + return TSDB_CODE_SUCCESS; + +_return: + + if (dbCache) { + ctgReleaseDBCache(pCtg, dbCache); + } + + *pCache = NULL; + + CTG_CACHE_STAT_ADD(vgMissNum, 1); + + return TSDB_CODE_SUCCESS; +} + +int32_t ctgTbMetaExistInCache(SCatalog* pCtg, char *dbFName, char* tbName, int32_t *exist) { + if (NULL == pCtg->dbCache) { + *exist = 0; + ctgWarn("empty db cache, dbFName:%s, tbName:%s", dbFName, tbName); + return TSDB_CODE_SUCCESS; + } + + SCtgDBCache *dbCache = NULL; + ctgAcquireDBCache(pCtg, dbFName, &dbCache); + if (NULL == dbCache) { + *exist = 0; + return TSDB_CODE_SUCCESS; + } + + size_t sz = 0; + CTG_LOCK(CTG_READ, &dbCache->tbCache.metaLock); + STableMeta *tbMeta = taosHashGet(dbCache->tbCache.metaCache, tbName, strlen(tbName)); + CTG_UNLOCK(CTG_READ, &dbCache->tbCache.metaLock); + + if (NULL == tbMeta) { + ctgReleaseDBCache(pCtg, dbCache); + + *exist = 0; + ctgDebug("tbmeta not in cache, dbFName:%s, tbName:%s", dbFName, tbName); + return TSDB_CODE_SUCCESS; + } + + *exist = 1; + + ctgReleaseDBCache(pCtg, dbCache); + + ctgDebug("tbmeta is in cache, dbFName:%s, tbName:%s", dbFName, tbName); + + return TSDB_CODE_SUCCESS; +} + + +int32_t ctgReadTbMetaFromCache(SCatalog* pCtg, SCtgTbMetaCtx* ctx, STableMeta** pTableMeta) { + int32_t code = 0; + SCtgDBCache *dbCache = NULL; + + *pTableMeta = NULL; + + if (NULL == pCtg->dbCache) { + ctgDebug("empty tbmeta cache, tbName:%s", ctx->pName->tname); + return TSDB_CODE_SUCCESS; + } + + char dbFName[TSDB_DB_FNAME_LEN] = {0}; + if (CTG_FLAG_IS_SYS_DB(ctx->flag)) { + strcpy(dbFName, ctx->pName->dbname); + } else { + tNameGetFullDbName(ctx->pName, dbFName); + } + + ctgAcquireDBCache(pCtg, dbFName, &dbCache); + if (NULL == dbCache) { + ctgDebug("db %d.%s not in cache", ctx->pName->acctId, ctx->pName->dbname); + return TSDB_CODE_SUCCESS; + } + + int32_t sz = 0; + CTG_LOCK(CTG_READ, &dbCache->tbCache.metaLock); + taosHashGetDup_m(dbCache->tbCache.metaCache, ctx->pName->tname, strlen(ctx->pName->tname), (void **)pTableMeta, &sz); + CTG_UNLOCK(CTG_READ, &dbCache->tbCache.metaLock); + + if (NULL == *pTableMeta) { + ctgReleaseDBCache(pCtg, dbCache); + ctgDebug("tbl not in cache, dbFName:%s, tbName:%s", dbFName, ctx->pName->tname); + return TSDB_CODE_SUCCESS; + } + + STableMeta* tbMeta = *pTableMeta; + ctx->tbInfo.inCache = true; + ctx->tbInfo.dbId = dbCache->dbId; + ctx->tbInfo.suid = tbMeta->suid; + ctx->tbInfo.tbType = tbMeta->tableType; + + if (tbMeta->tableType != TSDB_CHILD_TABLE) { + ctgReleaseDBCache(pCtg, dbCache); + ctgDebug("Got meta from cache, type:%d, dbFName:%s, tbName:%s", tbMeta->tableType, dbFName, ctx->pName->tname); + + CTG_CACHE_STAT_ADD(tblHitNum, 1); + return TSDB_CODE_SUCCESS; + } + + CTG_LOCK(CTG_READ, &dbCache->tbCache.stbLock); + + STableMeta **stbMeta = taosHashGet(dbCache->tbCache.stbCache, &tbMeta->suid, sizeof(tbMeta->suid)); + if (NULL == stbMeta || NULL == *stbMeta) { + CTG_UNLOCK(CTG_READ, &dbCache->tbCache.stbLock); + ctgError("stb not in stbCache, suid:%"PRIx64, tbMeta->suid); + goto _return; + } + + if ((*stbMeta)->suid != tbMeta->suid) { + CTG_UNLOCK(CTG_READ, &dbCache->tbCache.stbLock); + ctgError("stable suid in stbCache mis-match, expected suid:%"PRIx64 ",actual suid:%"PRIx64, tbMeta->suid, (*stbMeta)->suid); + CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR); + } + + int32_t metaSize = CTG_META_SIZE(*stbMeta); + *pTableMeta = taosMemoryRealloc(*pTableMeta, metaSize); + if (NULL == *pTableMeta) { + CTG_UNLOCK(CTG_READ, &dbCache->tbCache.stbLock); + ctgError("realloc size[%d] failed", metaSize); + CTG_ERR_JRET(TSDB_CODE_CTG_MEM_ERROR); + } + + memcpy(&(*pTableMeta)->sversion, &(*stbMeta)->sversion, metaSize - sizeof(SCTableMeta)); + + CTG_UNLOCK(CTG_READ, &dbCache->tbCache.stbLock); + + ctgReleaseDBCache(pCtg, dbCache); + + CTG_CACHE_STAT_ADD(tblHitNum, 1); + + ctgDebug("Got tbmeta from cache, dbFName:%s, tbName:%s", dbFName, ctx->pName->tname); + + return TSDB_CODE_SUCCESS; + +_return: + + ctgReleaseDBCache(pCtg, dbCache); + taosMemoryFreeClear(*pTableMeta); + + CTG_CACHE_STAT_ADD(tblMissNum, 1); + + CTG_RET(code); +} + +int32_t ctgReadTbVerFromCache(SCatalog *pCtg, const SName *pTableName, int32_t *sver, int32_t *tver, int32_t *tbType, uint64_t *suid, + char *stbName) { + *sver = -1; + *tver = -1; + + if (NULL == pCtg->dbCache) { + ctgDebug("empty tbmeta cache, tbName:%s", pTableName->tname); + return TSDB_CODE_SUCCESS; + } + + SCtgDBCache *dbCache = NULL; + char dbFName[TSDB_DB_FNAME_LEN] = {0}; + tNameGetFullDbName(pTableName, dbFName); + + ctgAcquireDBCache(pCtg, dbFName, &dbCache); + if (NULL == dbCache) { + ctgDebug("db %s not in cache", pTableName->tname); + return TSDB_CODE_SUCCESS; + } + + CTG_LOCK(CTG_READ, &dbCache->tbCache.metaLock); + STableMeta *tbMeta = taosHashGet(dbCache->tbCache.metaCache, pTableName->tname, strlen(pTableName->tname)); + if (tbMeta) { + *tbType = tbMeta->tableType; + *suid = tbMeta->suid; + if (*tbType != TSDB_CHILD_TABLE) { + *sver = tbMeta->sversion; + *tver = tbMeta->tversion; + } + } + CTG_UNLOCK(CTG_READ, &dbCache->tbCache.metaLock); + + if (NULL == tbMeta) { + ctgReleaseDBCache(pCtg, dbCache); + return TSDB_CODE_SUCCESS; + } + + if (*tbType != TSDB_CHILD_TABLE) { + ctgReleaseDBCache(pCtg, dbCache); + ctgDebug("Got sver %d tver %d from cache, type:%d, dbFName:%s, tbName:%s", *sver, *tver, *tbType, dbFName, pTableName->tname); + + return TSDB_CODE_SUCCESS; + } + + ctgDebug("Got subtable meta from cache, dbFName:%s, tbName:%s, suid:%" PRIx64, dbFName, pTableName->tname, *suid); + + CTG_LOCK(CTG_READ, &dbCache->tbCache.stbLock); + + STableMeta **stbMeta = taosHashGet(dbCache->tbCache.stbCache, suid, sizeof(*suid)); + if (NULL == stbMeta || NULL == *stbMeta) { + CTG_UNLOCK(CTG_READ, &dbCache->tbCache.stbLock); + ctgReleaseDBCache(pCtg, dbCache); + ctgDebug("stb not in stbCache, suid:%" PRIx64, *suid); + return TSDB_CODE_SUCCESS; + } + + if ((*stbMeta)->suid != *suid) { + CTG_UNLOCK(CTG_READ, &dbCache->tbCache.stbLock); + ctgReleaseDBCache(pCtg, dbCache); + ctgError("stable suid in stbCache mis-match, expected suid:%" PRIx64 ",actual suid:%" PRIx64, *suid, + (*stbMeta)->suid); + CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR); + } + + size_t nameLen = 0; + char *name = taosHashGetKey(*stbMeta, &nameLen); + + strncpy(stbName, name, nameLen); + stbName[nameLen] = 0; + + *sver = (*stbMeta)->sversion; + *tver = (*stbMeta)->tversion; + + CTG_UNLOCK(CTG_READ, &dbCache->tbCache.stbLock); + + ctgReleaseDBCache(pCtg, dbCache); + + ctgDebug("Got sver %d tver %d from cache, type:%d, dbFName:%s, tbName:%s", *sver, *tver, *tbType, dbFName, pTableName->tname); + + return TSDB_CODE_SUCCESS; +} + + +int32_t ctgReadTbTypeFromCache(SCatalog* pCtg, const char* dbFName, const char *tableName, int32_t *tbType) { + if (NULL == pCtg->dbCache) { + ctgWarn("empty db cache, dbFName:%s, tbName:%s", dbFName, tableName); + return TSDB_CODE_SUCCESS; + } + + SCtgDBCache *dbCache = NULL; + ctgAcquireDBCache(pCtg, dbFName, &dbCache); + if (NULL == dbCache) { + return TSDB_CODE_SUCCESS; + } + + CTG_LOCK(CTG_READ, &dbCache->tbCache.metaLock); + STableMeta *pTableMeta = (STableMeta *)taosHashAcquire(dbCache->tbCache.metaCache, tableName, strlen(tableName)); + + if (NULL == pTableMeta) { + CTG_UNLOCK(CTG_READ, &dbCache->tbCache.metaLock); + ctgWarn("tbl not in cache, dbFName:%s, tbName:%s", dbFName, tableName); + ctgReleaseDBCache(pCtg, dbCache); + + return TSDB_CODE_SUCCESS; + } + + *tbType = atomic_load_8(&pTableMeta->tableType); + + taosHashRelease(dbCache->tbCache.metaCache, pTableMeta); + + CTG_UNLOCK(CTG_READ, &dbCache->tbCache.metaLock); + + ctgReleaseDBCache(pCtg, dbCache); + + ctgDebug("Got tbtype from cache, dbFName:%s, tbName:%s, type:%d", dbFName, tableName, *tbType); + + return TSDB_CODE_SUCCESS; +} + +int32_t ctgChkAuthFromCache(SCatalog* pCtg, const char* user, const char* dbFName, AUTH_TYPE type, bool *inCache, bool *pass) { + if (NULL == pCtg->userCache) { + ctgDebug("empty user auth cache, user:%s", user); + goto _return; + } + + SCtgUserAuth *pUser = (SCtgUserAuth *)taosHashGet(pCtg->userCache, user, strlen(user)); + if (NULL == pUser) { + ctgDebug("user not in cache, user:%s", user); + goto _return; + } + + *inCache = true; + + ctgDebug("Got user from cache, user:%s", user); + CTG_CACHE_STAT_ADD(userHitNum, 1); + + if (pUser->superUser) { + *pass = true; + return TSDB_CODE_SUCCESS; + } + + CTG_LOCK(CTG_READ, &pUser->lock); + if (pUser->createdDbs && taosHashGet(pUser->createdDbs, dbFName, strlen(dbFName))) { + *pass = true; + CTG_UNLOCK(CTG_READ, &pUser->lock); + return TSDB_CODE_SUCCESS; + } + + if (pUser->readDbs && taosHashGet(pUser->readDbs, dbFName, strlen(dbFName)) && type == AUTH_TYPE_READ) { + *pass = true; + } + + if (pUser->writeDbs && taosHashGet(pUser->writeDbs, dbFName, strlen(dbFName)) && type == AUTH_TYPE_WRITE) { + *pass = true; + } + + CTG_UNLOCK(CTG_READ, &pUser->lock); + + return TSDB_CODE_SUCCESS; + +_return: + + *inCache = false; + CTG_CACHE_STAT_ADD(userMissNum, 1); + + return TSDB_CODE_SUCCESS; +} + + +void ctgWaitOpDone(SCtgCacheOperation *action) { + while (true) { + tsem_wait(&gCtgMgmt.queue.rspSem); + + if (atomic_load_8((int8_t*)&gCtgMgmt.exit)) { + tsem_post(&gCtgMgmt.queue.rspSem); + break; + } + + if (gCtgMgmt.queue.seqDone >= action->seqId) { + break; + } + + tsem_post(&gCtgMgmt.queue.rspSem); + sched_yield(); + } +} + +void ctgDequeue(SCtgCacheOperation **op) { + SCtgQNode *orig = gCtgMgmt.queue.head; + + SCtgQNode *node = gCtgMgmt.queue.head->next; + gCtgMgmt.queue.head = gCtgMgmt.queue.head->next; + + CTG_QUEUE_SUB(); + + taosMemoryFreeClear(orig); + + *op = &node->op; +} + + +int32_t ctgEnqueue(SCatalog* pCtg, SCtgCacheOperation *operation) { + SCtgQNode *node = taosMemoryCalloc(1, sizeof(SCtgQNode)); + if (NULL == node) { + qError("calloc %d failed", (int32_t)sizeof(SCtgQNode)); + CTG_RET(TSDB_CODE_CTG_MEM_ERROR); + } + + operation->seqId = atomic_add_fetch_64(&gCtgMgmt.queue.seqId, 1); + + node->op = *operation; + + CTG_LOCK(CTG_WRITE, &gCtgMgmt.queue.qlock); + gCtgMgmt.queue.tail->next = node; + gCtgMgmt.queue.tail = node; + CTG_UNLOCK(CTG_WRITE, &gCtgMgmt.queue.qlock); + + CTG_QUEUE_ADD(); + CTG_RUNTIME_STAT_ADD(qNum, 1); + + tsem_post(&gCtgMgmt.queue.reqSem); + + ctgDebug("action [%s] added into queue", gCtgCacheOperation[operation->opId].name); + + if (operation->syncReq) { + ctgWaitOpDone(operation); + } + + return TSDB_CODE_SUCCESS; +} + + +int32_t ctgDropDbCacheEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId) { + int32_t code = 0; + SCtgCacheOperation action= {.opId = CTG_OP_DROP_DB_CACHE}; + SCtgRemoveDBMsg *msg = taosMemoryMalloc(sizeof(SCtgRemoveDBMsg)); + if (NULL == msg) { + ctgError("malloc %d failed", (int32_t)sizeof(SCtgRemoveDBMsg)); + CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); + } + + char *p = strchr(dbFName, '.'); + if (p && CTG_IS_SYS_DBNAME(p + 1)) { + dbFName = p + 1; + } + + msg->pCtg = pCtg; + strncpy(msg->dbFName, dbFName, sizeof(msg->dbFName)); + msg->dbId = dbId; + + action.data = msg; + + CTG_ERR_JRET(ctgEnqueue(pCtg, &action)); + + return TSDB_CODE_SUCCESS; + +_return: + + taosMemoryFreeClear(action.data); + CTG_RET(code); +} + + +int32_t ctgDropStbMetaEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *stbName, uint64_t suid, bool syncReq) { + int32_t code = 0; + SCtgCacheOperation action= {.opId = CTG_OP_DROP_STB_META, .syncReq = syncReq}; + SCtgRemoveStbMsg *msg = taosMemoryMalloc(sizeof(SCtgRemoveStbMsg)); + if (NULL == msg) { + ctgError("malloc %d failed", (int32_t)sizeof(SCtgRemoveStbMsg)); + CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); + } + + msg->pCtg = pCtg; + strncpy(msg->dbFName, dbFName, sizeof(msg->dbFName)); + strncpy(msg->stbName, stbName, sizeof(msg->stbName)); + msg->dbId = dbId; + msg->suid = suid; + + action.data = msg; + + CTG_ERR_JRET(ctgEnqueue(pCtg, &action)); + + return TSDB_CODE_SUCCESS; + +_return: + + taosMemoryFreeClear(action.data); + CTG_RET(code); +} + + + +int32_t ctgDropTbMetaEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *tbName, bool syncReq) { + int32_t code = 0; + SCtgCacheOperation action= {.opId = CTG_OP_DROP_TB_META, .syncReq = syncReq}; + SCtgRemoveTblMsg *msg = taosMemoryMalloc(sizeof(SCtgRemoveTblMsg)); + if (NULL == msg) { + ctgError("malloc %d failed", (int32_t)sizeof(SCtgRemoveTblMsg)); + CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); + } + + msg->pCtg = pCtg; + strncpy(msg->dbFName, dbFName, sizeof(msg->dbFName)); + strncpy(msg->tbName, tbName, sizeof(msg->tbName)); + msg->dbId = dbId; + + action.data = msg; + + CTG_ERR_JRET(ctgEnqueue(pCtg, &action)); + + return TSDB_CODE_SUCCESS; + +_return: + + taosMemoryFreeClear(action.data); + CTG_RET(code); +} + +int32_t ctgUpdateVgroupEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, SDBVgInfo* dbInfo, bool syncReq) { + int32_t code = 0; + SCtgCacheOperation action= {.opId = CTG_OP_UPDATE_VGROUP, .syncReq = syncReq}; + SCtgUpdateVgMsg *msg = taosMemoryMalloc(sizeof(SCtgUpdateVgMsg)); + if (NULL == msg) { + ctgError("malloc %d failed", (int32_t)sizeof(SCtgUpdateVgMsg)); + ctgFreeVgInfo(dbInfo); + CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); + } + + char *p = strchr(dbFName, '.'); + if (p && CTG_IS_SYS_DBNAME(p + 1)) { + dbFName = p + 1; + } + + strncpy(msg->dbFName, dbFName, sizeof(msg->dbFName)); + msg->pCtg = pCtg; + msg->dbId = dbId; + msg->dbInfo = dbInfo; + + action.data = msg; + + CTG_ERR_JRET(ctgEnqueue(pCtg, &action)); + + return TSDB_CODE_SUCCESS; + +_return: + + ctgFreeVgInfo(dbInfo); + taosMemoryFreeClear(action.data); + CTG_RET(code); +} + +int32_t ctgUpdateTbMetaEnqueue(SCatalog* pCtg, STableMetaOutput *output, bool syncReq) { + int32_t code = 0; + SCtgCacheOperation action= {.opId = CTG_OP_UPDATE_TB_META, .syncReq = syncReq}; + SCtgUpdateTblMsg *msg = taosMemoryMalloc(sizeof(SCtgUpdateTblMsg)); + if (NULL == msg) { + ctgError("malloc %d failed", (int32_t)sizeof(SCtgUpdateTblMsg)); + CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); + } + + char *p = strchr(output->dbFName, '.'); + if (p && CTG_IS_SYS_DBNAME(p + 1)) { + memmove(output->dbFName, p + 1, strlen(p + 1)); + } + + msg->pCtg = pCtg; + msg->output = output; + + action.data = msg; + + CTG_ERR_JRET(ctgEnqueue(pCtg, &action)); + + return TSDB_CODE_SUCCESS; + +_return: + + taosMemoryFreeClear(msg); + + CTG_RET(code); +} + +int32_t ctgUpdateVgEpsetEnqueue(SCatalog* pCtg, char *dbFName, int32_t vgId, SEpSet* pEpSet) { + int32_t code = 0; + SCtgCacheOperation operation= {.opId = CTG_OP_UPDATE_VG_EPSET}; + SCtgUpdateEpsetMsg *msg = taosMemoryMalloc(sizeof(SCtgUpdateEpsetMsg)); + if (NULL == msg) { + ctgError("malloc %d failed", (int32_t)sizeof(SCtgUpdateEpsetMsg)); + CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); + } + + msg->pCtg = pCtg; + strcpy(msg->dbFName, dbFName); + msg->vgId = vgId; + msg->epSet = *pEpSet; + + operation.data = msg; + + CTG_ERR_JRET(ctgEnqueue(pCtg, &operation)); + + return TSDB_CODE_SUCCESS; + +_return: + + taosMemoryFreeClear(msg); + + CTG_RET(code); +} + + + +int32_t ctgUpdateUserEnqueue(SCatalog* pCtg, SGetUserAuthRsp *pAuth, bool syncReq) { + int32_t code = 0; + SCtgCacheOperation action= {.opId = CTG_OP_UPDATE_USER, .syncReq = syncReq}; + SCtgUpdateUserMsg *msg = taosMemoryMalloc(sizeof(SCtgUpdateUserMsg)); + if (NULL == msg) { + ctgError("malloc %d failed", (int32_t)sizeof(SCtgUpdateUserMsg)); + CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); + } + + msg->pCtg = pCtg; + msg->userAuth = *pAuth; + + action.data = msg; + + CTG_ERR_JRET(ctgEnqueue(pCtg, &action)); + + return TSDB_CODE_SUCCESS; + +_return: + + tFreeSGetUserAuthRsp(pAuth); + taosMemoryFreeClear(msg); + + CTG_RET(code); +} + +int32_t ctgMetaRentInit(SCtgRentMgmt *mgmt, uint32_t rentSec, int8_t type) { + mgmt->slotRIdx = 0; + mgmt->slotNum = rentSec / CTG_RENT_SLOT_SECOND; + mgmt->type = type; + + size_t msgSize = sizeof(SCtgRentSlot) * mgmt->slotNum; + + mgmt->slots = taosMemoryCalloc(1, msgSize); + if (NULL == mgmt->slots) { + qError("calloc %d failed", (int32_t)msgSize); + CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); + } + + qDebug("meta rent initialized, type:%d, slotNum:%d", type, mgmt->slotNum); + + return TSDB_CODE_SUCCESS; +} + + +int32_t ctgMetaRentAdd(SCtgRentMgmt *mgmt, void *meta, int64_t id, int32_t size) { + int16_t widx = abs((int)(id % mgmt->slotNum)); + + SCtgRentSlot *slot = &mgmt->slots[widx]; + int32_t code = 0; + + CTG_LOCK(CTG_WRITE, &slot->lock); + if (NULL == slot->meta) { + slot->meta = taosArrayInit(CTG_DEFAULT_RENT_SLOT_SIZE, size); + if (NULL == slot->meta) { + qError("taosArrayInit %d failed, id:%"PRIx64", slot idx:%d, type:%d", CTG_DEFAULT_RENT_SLOT_SIZE, id, widx, mgmt->type); + CTG_ERR_JRET(TSDB_CODE_CTG_MEM_ERROR); + } + } + + if (NULL == taosArrayPush(slot->meta, meta)) { + qError("taosArrayPush meta to rent failed, id:%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type); + CTG_ERR_JRET(TSDB_CODE_CTG_MEM_ERROR); + } + + slot->needSort = true; + + qDebug("add meta to rent, id:%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type); + +_return: + + CTG_UNLOCK(CTG_WRITE, &slot->lock); + CTG_RET(code); +} + +int32_t ctgMetaRentUpdate(SCtgRentMgmt *mgmt, void *meta, int64_t id, int32_t size, __compar_fn_t sortCompare, __compar_fn_t searchCompare) { + int16_t widx = abs((int)(id % mgmt->slotNum)); + + SCtgRentSlot *slot = &mgmt->slots[widx]; + int32_t code = 0; + + CTG_LOCK(CTG_WRITE, &slot->lock); + if (NULL == slot->meta) { + qError("empty meta slot, id:%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type); + CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR); + } + + if (slot->needSort) { + qDebug("meta slot before sorte, slot idx:%d, type:%d, size:%d", widx, mgmt->type, (int32_t)taosArrayGetSize(slot->meta)); + taosArraySort(slot->meta, sortCompare); + slot->needSort = false; + qDebug("meta slot sorted, slot idx:%d, type:%d, size:%d", widx, mgmt->type, (int32_t)taosArrayGetSize(slot->meta)); + } + + void *orig = taosArraySearch(slot->meta, &id, searchCompare, TD_EQ); + if (NULL == orig) { + qError("meta not found in slot, id:%"PRIx64", slot idx:%d, type:%d, size:%d", id, widx, mgmt->type, (int32_t)taosArrayGetSize(slot->meta)); + CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR); + } + + memcpy(orig, meta, size); + + qDebug("meta in rent updated, id:%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type); + +_return: + + CTG_UNLOCK(CTG_WRITE, &slot->lock); + + if (code) { + qWarn("meta in rent update failed, will try to add it, code:%x, id:%"PRIx64", slot idx:%d, type:%d", code, id, widx, mgmt->type); + CTG_RET(ctgMetaRentAdd(mgmt, meta, id, size)); + } + + CTG_RET(code); +} + +int32_t ctgMetaRentRemove(SCtgRentMgmt *mgmt, int64_t id, __compar_fn_t sortCompare, __compar_fn_t searchCompare) { + int16_t widx = abs((int)(id % mgmt->slotNum)); + + SCtgRentSlot *slot = &mgmt->slots[widx]; + int32_t code = 0; + + CTG_LOCK(CTG_WRITE, &slot->lock); + if (NULL == slot->meta) { + qError("empty meta slot, id:%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type); + CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR); + } + + if (slot->needSort) { + taosArraySort(slot->meta, sortCompare); + slot->needSort = false; + qDebug("meta slot sorted, slot idx:%d, type:%d", widx, mgmt->type); + } + + int32_t idx = taosArraySearchIdx(slot->meta, &id, searchCompare, TD_EQ); + if (idx < 0) { + qError("meta not found in slot, id:%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type); + CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR); + } + + taosArrayRemove(slot->meta, idx); + + qDebug("meta in rent removed, id:%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type); + +_return: + + CTG_UNLOCK(CTG_WRITE, &slot->lock); + + CTG_RET(code); +} + + +int32_t ctgMetaRentGetImpl(SCtgRentMgmt *mgmt, void **res, uint32_t *num, int32_t size) { + int16_t ridx = atomic_add_fetch_16(&mgmt->slotRIdx, 1); + if (ridx >= mgmt->slotNum) { + ridx %= mgmt->slotNum; + atomic_store_16(&mgmt->slotRIdx, ridx); + } + + SCtgRentSlot *slot = &mgmt->slots[ridx]; + int32_t code = 0; + + CTG_LOCK(CTG_READ, &slot->lock); + if (NULL == slot->meta) { + qDebug("empty meta in slot:%d, type:%d", ridx, mgmt->type); + *num = 0; + goto _return; + } + + size_t metaNum = taosArrayGetSize(slot->meta); + if (metaNum <= 0) { + qDebug("no meta in slot:%d, type:%d", ridx, mgmt->type); + *num = 0; + goto _return; + } + + size_t msize = metaNum * size; + *res = taosMemoryMalloc(msize); + if (NULL == *res) { + qError("malloc %d failed", (int32_t)msize); + CTG_ERR_JRET(TSDB_CODE_CTG_MEM_ERROR); + } + + void *meta = taosArrayGet(slot->meta, 0); + + memcpy(*res, meta, msize); + + *num = (uint32_t)metaNum; + + qDebug("Got %d meta from rent, type:%d", (int32_t)metaNum, mgmt->type); + +_return: + + CTG_UNLOCK(CTG_READ, &slot->lock); + + CTG_RET(code); +} + +int32_t ctgMetaRentGet(SCtgRentMgmt *mgmt, void **res, uint32_t *num, int32_t size) { + while (true) { + int64_t msec = taosGetTimestampMs(); + int64_t lsec = atomic_load_64(&mgmt->lastReadMsec); + if ((msec - lsec) < CTG_RENT_SLOT_SECOND * 1000) { + *res = NULL; + *num = 0; + qDebug("too short time period to get expired meta, type:%d", mgmt->type); + return TSDB_CODE_SUCCESS; + } + + if (lsec != atomic_val_compare_exchange_64(&mgmt->lastReadMsec, lsec, msec)) { + continue; + } + + break; + } + + CTG_ERR_RET(ctgMetaRentGetImpl(mgmt, res, num, size)); + + return TSDB_CODE_SUCCESS; +} + +int32_t ctgAddNewDBCache(SCatalog *pCtg, const char *dbFName, uint64_t dbId) { + int32_t code = 0; + + SCtgDBCache newDBCache = {0}; + newDBCache.dbId = dbId; + + newDBCache.tbCache.metaCache = taosHashInit(gCtgMgmt.cfg.maxTblCacheNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK); + if (NULL == newDBCache.tbCache.metaCache) { + ctgError("taosHashInit %d metaCache failed", gCtgMgmt.cfg.maxTblCacheNum); + CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); + } + + newDBCache.tbCache.stbCache = taosHashInit(gCtgMgmt.cfg.maxTblCacheNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), true, HASH_ENTRY_LOCK); + if (NULL == newDBCache.tbCache.stbCache) { + ctgError("taosHashInit %d stbCache failed", gCtgMgmt.cfg.maxTblCacheNum); + CTG_ERR_JRET(TSDB_CODE_CTG_MEM_ERROR); + } + + code = taosHashPut(pCtg->dbCache, dbFName, strlen(dbFName), &newDBCache, sizeof(SCtgDBCache)); + if (code) { + if (HASH_NODE_EXIST(code)) { + ctgDebug("db already in cache, dbFName:%s", dbFName); + goto _return; + } + + ctgError("taosHashPut db to cache failed, dbFName:%s", dbFName); + CTG_ERR_JRET(TSDB_CODE_CTG_MEM_ERROR); + } + + CTG_CACHE_STAT_ADD(dbNum, 1); + + SDbVgVersion vgVersion = {.dbId = newDBCache.dbId, .vgVersion = -1}; + strncpy(vgVersion.dbFName, dbFName, sizeof(vgVersion.dbFName)); + + ctgDebug("db added to cache, dbFName:%s, dbId:%"PRIx64, dbFName, dbId); + + CTG_ERR_RET(ctgMetaRentAdd(&pCtg->dbRent, &vgVersion, dbId, sizeof(SDbVgVersion))); + + ctgDebug("db added to rent, dbFName:%s, vgVersion:%d, dbId:%"PRIx64, dbFName, vgVersion.vgVersion, dbId); + + return TSDB_CODE_SUCCESS; + +_return: + + ctgFreeDbCache(&newDBCache); + + CTG_RET(code); +} + + +void ctgRemoveStbRent(SCatalog* pCtg, SCtgTbMetaCache *cache) { + CTG_LOCK(CTG_WRITE, &cache->stbLock); + if (cache->stbCache) { + void *pIter = taosHashIterate(cache->stbCache, NULL); + while (pIter) { + uint64_t *suid = NULL; + suid = taosHashGetKey(pIter, NULL); + + if (TSDB_CODE_SUCCESS == ctgMetaRentRemove(&pCtg->stbRent, *suid, ctgStbVersionSortCompare, ctgStbVersionSearchCompare)) { + ctgDebug("stb removed from rent, suid:%"PRIx64, *suid); + } + + pIter = taosHashIterate(cache->stbCache, pIter); + } + } + CTG_UNLOCK(CTG_WRITE, &cache->stbLock); +} + + +int32_t ctgRemoveDBFromCache(SCatalog* pCtg, SCtgDBCache *dbCache, const char* dbFName) { + uint64_t dbId = dbCache->dbId; + + ctgInfo("start to remove db from cache, dbFName:%s, dbId:%"PRIx64, dbFName, dbCache->dbId); + + atomic_store_8(&dbCache->deleted, 1); + + ctgRemoveStbRent(pCtg, &dbCache->tbCache); + + ctgFreeDbCache(dbCache); + + CTG_ERR_RET(ctgMetaRentRemove(&pCtg->dbRent, dbCache->dbId, ctgDbVgVersionSortCompare, ctgDbVgVersionSearchCompare)); + + ctgDebug("db removed from rent, dbFName:%s, dbId:%"PRIx64, dbFName, dbCache->dbId); + + if (taosHashRemove(pCtg->dbCache, dbFName, strlen(dbFName))) { + ctgInfo("taosHashRemove from dbCache failed, may be removed, dbFName:%s", dbFName); + CTG_ERR_RET(TSDB_CODE_CTG_DB_DROPPED); + } + + CTG_CACHE_STAT_SUB(dbNum, 1); + + ctgInfo("db removed from cache, dbFName:%s, dbId:%"PRIx64, dbFName, dbId); + + return TSDB_CODE_SUCCESS; +} + + +int32_t ctgGetAddDBCache(SCatalog* pCtg, const char *dbFName, uint64_t dbId, SCtgDBCache **pCache) { + int32_t code = 0; + SCtgDBCache *dbCache = NULL; + ctgGetDBCache(pCtg, dbFName, &dbCache); + + if (dbCache) { + // TODO OPEN IT +#if 0 + if (dbCache->dbId == dbId) { + *pCache = dbCache; + return TSDB_CODE_SUCCESS; + } +#else + if (0 == dbId) { + *pCache = dbCache; + return TSDB_CODE_SUCCESS; + } + + if (dbId && (dbCache->dbId == 0)) { + dbCache->dbId = dbId; + *pCache = dbCache; + return TSDB_CODE_SUCCESS; + } + + if (dbCache->dbId == dbId) { + *pCache = dbCache; + return TSDB_CODE_SUCCESS; + } +#endif + CTG_ERR_RET(ctgRemoveDBFromCache(pCtg, dbCache, dbFName)); + } + + CTG_ERR_RET(ctgAddNewDBCache(pCtg, dbFName, dbId)); + + ctgGetDBCache(pCtg, dbFName, &dbCache); + + *pCache = dbCache; + + return TSDB_CODE_SUCCESS; +} + + +int32_t ctgWriteDBVgInfoToCache(SCatalog* pCtg, const char* dbFName, uint64_t dbId, SDBVgInfo** pDbInfo) { + int32_t code = 0; + SDBVgInfo* dbInfo = *pDbInfo; + + if (NULL == dbInfo->vgHash) { + return TSDB_CODE_SUCCESS; + } + + if (dbInfo->vgVersion < 0 || taosHashGetSize(dbInfo->vgHash) <= 0) { + ctgError("invalid db vgInfo, dbFName:%s, vgHash:%p, vgVersion:%d, vgHashSize:%d", + dbFName, dbInfo->vgHash, dbInfo->vgVersion, taosHashGetSize(dbInfo->vgHash)); + CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); + } + + bool newAdded = false; + SDbVgVersion vgVersion = {.dbId = dbId, .vgVersion = dbInfo->vgVersion, .numOfTable = dbInfo->numOfTable}; + + SCtgDBCache *dbCache = NULL; + CTG_ERR_RET(ctgGetAddDBCache(pCtg, dbFName, dbId, &dbCache)); + if (NULL == dbCache) { + ctgInfo("conflict db update, ignore this update, dbFName:%s, dbId:%"PRIx64, dbFName, dbId); + CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR); + } + + SDBVgInfo *vgInfo = NULL; + CTG_ERR_RET(ctgWAcquireVgInfo(pCtg, dbCache)); + + if (dbCache->vgInfo) { + if (dbInfo->vgVersion < dbCache->vgInfo->vgVersion) { + ctgDebug("db vgVersion is old, dbFName:%s, vgVersion:%d, currentVersion:%d", dbFName, dbInfo->vgVersion, dbCache->vgInfo->vgVersion); + ctgWReleaseVgInfo(dbCache); + + return TSDB_CODE_SUCCESS; + } + + if (dbInfo->vgVersion == dbCache->vgInfo->vgVersion && dbInfo->numOfTable == dbCache->vgInfo->numOfTable) { + ctgDebug("no new db vgVersion or numOfTable, dbFName:%s, vgVersion:%d, numOfTable:%d", dbFName, dbInfo->vgVersion, dbInfo->numOfTable); + ctgWReleaseVgInfo(dbCache); + + return TSDB_CODE_SUCCESS; + } + + ctgFreeVgInfo(dbCache->vgInfo); + } + + dbCache->vgInfo = dbInfo; + + *pDbInfo = NULL; + + ctgDebug("db vgInfo updated, dbFName:%s, vgVersion:%d, dbId:%"PRIx64, dbFName, vgVersion.vgVersion, vgVersion.dbId); + + ctgWReleaseVgInfo(dbCache); + + dbCache = NULL; + + strncpy(vgVersion.dbFName, dbFName, sizeof(vgVersion.dbFName)); + CTG_ERR_RET(ctgMetaRentUpdate(&pCtg->dbRent, &vgVersion, vgVersion.dbId, sizeof(SDbVgVersion), ctgDbVgVersionSortCompare, ctgDbVgVersionSearchCompare)); + + CTG_RET(code); +} + + +int32_t ctgWriteTbMetaToCache(SCatalog *pCtg, SCtgDBCache *dbCache, char *dbFName, uint64_t dbId, char *tbName, STableMeta *meta, int32_t metaSize) { + SCtgTbMetaCache *tbCache = &dbCache->tbCache; + + CTG_LOCK(CTG_READ, &tbCache->metaLock); + if (dbCache->deleted || NULL == tbCache->metaCache || NULL == tbCache->stbCache) { + CTG_UNLOCK(CTG_READ, &tbCache->metaLock); + ctgError("db is dropping, dbId:%"PRIx64, dbCache->dbId); + CTG_ERR_RET(TSDB_CODE_CTG_DB_DROPPED); + } + + int8_t origType = 0; + uint64_t origSuid = 0; + bool isStb = meta->tableType == TSDB_SUPER_TABLE; + STableMeta *orig = taosHashGet(tbCache->metaCache, tbName, strlen(tbName)); + if (orig) { + origType = orig->tableType; + + if (origType == meta->tableType && orig->uid == meta->uid && orig->sversion >= meta->sversion && orig->tversion >= meta->tversion) { + CTG_UNLOCK(CTG_READ, &tbCache->metaLock); + return TSDB_CODE_SUCCESS; + } + + if (origType == TSDB_SUPER_TABLE) { + CTG_LOCK(CTG_WRITE, &tbCache->stbLock); + if (taosHashRemove(tbCache->stbCache, &orig->suid, sizeof(orig->suid))) { + ctgError("stb not exist in stbCache, dbFName:%s, stb:%s, suid:%"PRIx64, dbFName, tbName, orig->suid); + } else { + CTG_CACHE_STAT_SUB(stblNum, 1); + } + CTG_UNLOCK(CTG_WRITE, &tbCache->stbLock); + + ctgDebug("stb removed from stbCache, dbFName:%s, stb:%s, suid:%"PRIx64, dbFName, tbName, orig->suid); + + ctgMetaRentRemove(&pCtg->stbRent, orig->suid, ctgStbVersionSortCompare, ctgStbVersionSearchCompare); + + origSuid = orig->suid; + } + } + + if (isStb) { + CTG_LOCK(CTG_WRITE, &tbCache->stbLock); + } + + if (taosHashPut(tbCache->metaCache, tbName, strlen(tbName), meta, metaSize) != 0) { + if (isStb) { + CTG_UNLOCK(CTG_WRITE, &tbCache->stbLock); + } + + CTG_UNLOCK(CTG_READ, &tbCache->metaLock); + ctgError("taosHashPut tbmeta to cache failed, dbFName:%s, tbName:%s, tbType:%d", dbFName, tbName, meta->tableType); + CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); + } + + if (NULL == orig) { + CTG_CACHE_STAT_ADD(tblNum, 1); + } + + ctgDebug("tbmeta updated to cache, dbFName:%s, tbName:%s, tbType:%d", dbFName, tbName, meta->tableType); + ctgdShowTableMeta(pCtg, tbName, meta); + + if (!isStb) { + CTG_UNLOCK(CTG_READ, &tbCache->metaLock); + return TSDB_CODE_SUCCESS; + } + + STableMeta *tbMeta = taosHashGet(tbCache->metaCache, tbName, strlen(tbName)); + if (taosHashPut(tbCache->stbCache, &meta->suid, sizeof(meta->suid), &tbMeta, POINTER_BYTES) != 0) { + CTG_UNLOCK(CTG_WRITE, &tbCache->stbLock); + CTG_UNLOCK(CTG_READ, &tbCache->metaLock); + ctgError("taosHashPut stable to stable cache failed, suid:%"PRIx64, meta->suid); + CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); + } + + CTG_CACHE_STAT_ADD(stblNum, 1); + + CTG_UNLOCK(CTG_WRITE, &tbCache->stbLock); + + CTG_UNLOCK(CTG_READ, &tbCache->metaLock); + + ctgDebug("stb updated to stbCache, dbFName:%s, tbName:%s, tbType:%d", dbFName, tbName, meta->tableType); + + SSTableMetaVersion metaRent = {.dbId = dbId, .suid = meta->suid, .sversion = meta->sversion, .tversion = meta->tversion}; + strcpy(metaRent.dbFName, dbFName); + strcpy(metaRent.stbName, tbName); + CTG_ERR_RET(ctgMetaRentAdd(&pCtg->stbRent, &metaRent, metaRent.suid, sizeof(SSTableMetaVersion))); + + return TSDB_CODE_SUCCESS; +} + +int32_t ctgUpdateTbMetaToCache(SCatalog* pCtg, STableMetaOutput* pOut, bool syncReq) { + STableMetaOutput* pOutput = NULL; + int32_t code = 0; + + CTG_ERR_RET(ctgCloneMetaOutput(pOut, &pOutput)); + CTG_ERR_JRET(ctgUpdateTbMetaEnqueue(pCtg, pOutput, syncReq)); + + return TSDB_CODE_SUCCESS; + +_return: + + ctgFreeSTableMetaOutput(pOutput); + CTG_RET(code); +} + + +int32_t ctgOpUpdateVgroup(SCtgCacheOperation *operation) { + int32_t code = 0; + SCtgUpdateVgMsg *msg = operation->data; + + CTG_ERR_JRET(ctgWriteDBVgInfoToCache(msg->pCtg, msg->dbFName, msg->dbId, &msg->dbInfo)); + +_return: + + ctgFreeVgInfo(msg->dbInfo); + taosMemoryFreeClear(msg); + + CTG_RET(code); +} + +int32_t ctgOpDropDbCache(SCtgCacheOperation *operation) { + int32_t code = 0; + SCtgRemoveDBMsg *msg = operation->data; + SCatalog* pCtg = msg->pCtg; + + SCtgDBCache *dbCache = NULL; + ctgGetDBCache(msg->pCtg, msg->dbFName, &dbCache); + if (NULL == dbCache) { + goto _return; + } + + if (dbCache->dbId != msg->dbId) { + ctgInfo("dbId already updated, dbFName:%s, dbId:%"PRIx64 ", targetId:%"PRIx64, msg->dbFName, dbCache->dbId, msg->dbId); + goto _return; + } + + CTG_ERR_JRET(ctgRemoveDBFromCache(pCtg, dbCache, msg->dbFName)); + +_return: + + taosMemoryFreeClear(msg); + + CTG_RET(code); +} + + +int32_t ctgOpUpdateTbMeta(SCtgCacheOperation *operation) { + int32_t code = 0; + SCtgUpdateTblMsg *msg = operation->data; + SCatalog* pCtg = msg->pCtg; + STableMetaOutput* output = msg->output; + SCtgDBCache *dbCache = NULL; + + if ((!CTG_IS_META_CTABLE(output->metaType)) && NULL == output->tbMeta) { + ctgError("no valid tbmeta got from meta rsp, dbFName:%s, tbName:%s", output->dbFName, output->tbName); + CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR); + } + + if (CTG_IS_META_BOTH(output->metaType) && TSDB_SUPER_TABLE != output->tbMeta->tableType) { + ctgError("table type error, expected:%d, actual:%d", TSDB_SUPER_TABLE, output->tbMeta->tableType); + CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR); + } + + CTG_ERR_JRET(ctgGetAddDBCache(pCtg, output->dbFName, output->dbId, &dbCache)); + if (NULL == dbCache) { + ctgInfo("conflict db update, ignore this update, dbFName:%s, dbId:%"PRIx64, output->dbFName, output->dbId); + CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR); + } + + if (CTG_IS_META_TABLE(output->metaType) || CTG_IS_META_BOTH(output->metaType)) { + int32_t metaSize = CTG_META_SIZE(output->tbMeta); + + CTG_ERR_JRET(ctgWriteTbMetaToCache(pCtg, dbCache, output->dbFName, output->dbId, output->tbName, output->tbMeta, metaSize)); + } + + if (CTG_IS_META_CTABLE(output->metaType) || CTG_IS_META_BOTH(output->metaType)) { + CTG_ERR_JRET(ctgWriteTbMetaToCache(pCtg, dbCache, output->dbFName, output->dbId, output->ctbName, (STableMeta *)&output->ctbMeta, sizeof(output->ctbMeta))); + } + +_return: + + if (output) { + taosMemoryFreeClear(output->tbMeta); + taosMemoryFreeClear(output); + } + + taosMemoryFreeClear(msg); + + CTG_RET(code); +} + + +int32_t ctgOpDropStbMeta(SCtgCacheOperation *operation) { + int32_t code = 0; + SCtgRemoveStbMsg *msg = operation->data; + SCatalog* pCtg = msg->pCtg; + + SCtgDBCache *dbCache = NULL; + ctgGetDBCache(pCtg, msg->dbFName, &dbCache); + if (NULL == dbCache) { + return TSDB_CODE_SUCCESS; + } + + if (msg->dbId && (dbCache->dbId != msg->dbId)) { + ctgDebug("dbId already modified, dbFName:%s, current:%"PRIx64", dbId:%"PRIx64", stb:%s, suid:%"PRIx64, msg->dbFName, dbCache->dbId, msg->dbId, msg->stbName, msg->suid); + return TSDB_CODE_SUCCESS; + } + + CTG_LOCK(CTG_WRITE, &dbCache->tbCache.stbLock); + if (taosHashRemove(dbCache->tbCache.stbCache, &msg->suid, sizeof(msg->suid))) { + ctgDebug("stb not exist in stbCache, may be removed, dbFName:%s, stb:%s, suid:%"PRIx64, msg->dbFName, msg->stbName, msg->suid); + } else { + CTG_CACHE_STAT_SUB(stblNum, 1); + } + + CTG_LOCK(CTG_READ, &dbCache->tbCache.metaLock); + if (taosHashRemove(dbCache->tbCache.metaCache, msg->stbName, strlen(msg->stbName))) { + ctgError("stb not exist in cache, dbFName:%s, stb:%s, suid:%"PRIx64, msg->dbFName, msg->stbName, msg->suid); + } else { + CTG_CACHE_STAT_SUB(tblNum, 1); + } + CTG_UNLOCK(CTG_READ, &dbCache->tbCache.metaLock); + + CTG_UNLOCK(CTG_WRITE, &dbCache->tbCache.stbLock); + + ctgInfo("stb removed from cache, dbFName:%s, stbName:%s, suid:%"PRIx64, msg->dbFName, msg->stbName, msg->suid); + + CTG_ERR_JRET(ctgMetaRentRemove(&msg->pCtg->stbRent, msg->suid, ctgStbVersionSortCompare, ctgStbVersionSearchCompare)); + + ctgDebug("stb removed from rent, dbFName:%s, stbName:%s, suid:%"PRIx64, msg->dbFName, msg->stbName, msg->suid); + +_return: + + taosMemoryFreeClear(msg); + + CTG_RET(code); +} + +int32_t ctgOpDropTbMeta(SCtgCacheOperation *operation) { + int32_t code = 0; + SCtgRemoveTblMsg *msg = operation->data; + SCatalog* pCtg = msg->pCtg; + + SCtgDBCache *dbCache = NULL; + ctgGetDBCache(pCtg, msg->dbFName, &dbCache); + if (NULL == dbCache) { + return TSDB_CODE_SUCCESS; + } + + if (dbCache->dbId != msg->dbId) { + ctgDebug("dbId already modified, dbFName:%s, current:%"PRIx64", dbId:%"PRIx64", tbName:%s", msg->dbFName, dbCache->dbId, msg->dbId, msg->tbName); + return TSDB_CODE_SUCCESS; + } + + CTG_LOCK(CTG_READ, &dbCache->tbCache.metaLock); + if (taosHashRemove(dbCache->tbCache.metaCache, msg->tbName, strlen(msg->tbName))) { + CTG_UNLOCK(CTG_READ, &dbCache->tbCache.metaLock); + ctgError("stb not exist in cache, dbFName:%s, tbName:%s", msg->dbFName, msg->tbName); + CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR); + } else { + CTG_CACHE_STAT_SUB(tblNum, 1); + } + CTG_UNLOCK(CTG_READ, &dbCache->tbCache.metaLock); + + ctgInfo("table removed from cache, dbFName:%s, tbName:%s", msg->dbFName, msg->tbName); + +_return: + + taosMemoryFreeClear(msg); + + CTG_RET(code); +} + +int32_t ctgOpUpdateUser(SCtgCacheOperation *operation) { + int32_t code = 0; + SCtgUpdateUserMsg *msg = operation->data; + SCatalog* pCtg = msg->pCtg; + + if (NULL == pCtg->userCache) { + pCtg->userCache = taosHashInit(gCtgMgmt.cfg.maxUserCacheNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_ENTRY_LOCK); + if (NULL == pCtg->userCache) { + ctgError("taosHashInit %d user cache failed", gCtgMgmt.cfg.maxUserCacheNum); + CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY); + } + } + + SCtgUserAuth *pUser = (SCtgUserAuth *)taosHashGet(pCtg->userCache, msg->userAuth.user, strlen(msg->userAuth.user)); + if (NULL == pUser) { + SCtgUserAuth userAuth = {0}; + + userAuth.version = msg->userAuth.version; + userAuth.superUser = msg->userAuth.superAuth; + userAuth.createdDbs = msg->userAuth.createdDbs; + userAuth.readDbs = msg->userAuth.readDbs; + userAuth.writeDbs = msg->userAuth.writeDbs; + + if (taosHashPut(pCtg->userCache, msg->userAuth.user, strlen(msg->userAuth.user), &userAuth, sizeof(userAuth))) { + ctgError("taosHashPut user %s to cache failed", msg->userAuth.user); + CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY); + } + + taosMemoryFreeClear(msg); + + return TSDB_CODE_SUCCESS; + } + + pUser->version = msg->userAuth.version; + + CTG_LOCK(CTG_WRITE, &pUser->lock); + + taosHashCleanup(pUser->createdDbs); + pUser->createdDbs = msg->userAuth.createdDbs; + msg->userAuth.createdDbs = NULL; + + taosHashCleanup(pUser->readDbs); + pUser->readDbs = msg->userAuth.readDbs; + msg->userAuth.readDbs = NULL; + + taosHashCleanup(pUser->writeDbs); + pUser->writeDbs = msg->userAuth.writeDbs; + msg->userAuth.writeDbs = NULL; + + CTG_UNLOCK(CTG_WRITE, &pUser->lock); + +_return: + + + taosHashCleanup(msg->userAuth.createdDbs); + taosHashCleanup(msg->userAuth.readDbs); + taosHashCleanup(msg->userAuth.writeDbs); + + taosMemoryFreeClear(msg); + + CTG_RET(code); +} + +int32_t ctgOpUpdateEpset(SCtgCacheOperation *operation) { + int32_t code = 0; + SCtgUpdateEpsetMsg *msg = operation->data; + SCatalog* pCtg = msg->pCtg; + + SCtgDBCache *dbCache = NULL; + CTG_ERR_RET(ctgAcquireDBCache(pCtg, msg->dbFName, &dbCache)); + if (NULL == dbCache) { + ctgDebug("db %s not exist, ignore epset update", msg->dbFName); + goto _return; + } + + SDBVgInfo *vgInfo = NULL; + CTG_ERR_RET(ctgWAcquireVgInfo(pCtg, dbCache)); + + if (NULL == dbCache->vgInfo) { + ctgWReleaseVgInfo(dbCache); + ctgDebug("vgroup in db %s not cached, ignore epset update", msg->dbFName); + goto _return; + } + + SVgroupInfo* pInfo = taosHashGet(dbCache->vgInfo->vgHash, &msg->vgId, sizeof(msg->vgId)); + if (NULL == pInfo) { + ctgWReleaseVgInfo(dbCache); + ctgDebug("no vgroup %d in db %s, ignore epset update", msg->vgId, msg->dbFName); + goto _return; + } + + pInfo->epSet = msg->epSet; + + ctgDebug("epset in vgroup %d updated, dbFName:%s", pInfo->vgId, msg->dbFName); + + ctgWReleaseVgInfo(dbCache); + +_return: + + if (dbCache) { + ctgReleaseDBCache(msg->pCtg, dbCache); + } + + taosMemoryFreeClear(msg); + + CTG_RET(code); +} + + +void ctgUpdateThreadUnexpectedStopped(void) { + if (CTG_IS_LOCKED(&gCtgMgmt.lock) > 0) CTG_UNLOCK(CTG_READ, &gCtgMgmt.lock); +} + +void* ctgUpdateThreadFunc(void* param) { + setThreadName("catalog"); +#ifdef WINDOWS + atexit(ctgUpdateThreadUnexpectedStopped); +#endif + qInfo("catalog update thread started"); + + CTG_LOCK(CTG_READ, &gCtgMgmt.lock); + + while (true) { + if (tsem_wait(&gCtgMgmt.queue.reqSem)) { + qError("ctg tsem_wait failed, error:%s", tstrerror(TAOS_SYSTEM_ERROR(errno))); + } + + if (atomic_load_8((int8_t*)&gCtgMgmt.exit)) { + tsem_post(&gCtgMgmt.queue.rspSem); + break; + } + + SCtgCacheOperation *operation = NULL; + ctgDequeue(&operation); + SCatalog *pCtg = ((SCtgUpdateMsgHeader *)operation->data)->pCtg; + + ctgDebug("process [%s] operation", gCtgCacheOperation[operation->opId].name); + + (*gCtgCacheOperation[operation->opId].func)(operation); + + gCtgMgmt.queue.seqDone = operation->seqId; + + if (operation->syncReq) { + tsem_post(&gCtgMgmt.queue.rspSem); + } + + CTG_RUNTIME_STAT_ADD(qDoneNum, 1); + + ctgdShowClusterCache(pCtg); + } + + if (CTG_IS_LOCKED(&gCtgMgmt.lock)) CTG_UNLOCK(CTG_READ, &gCtgMgmt.lock); + + qInfo("catalog update thread stopped"); + + return NULL; +} + + +int32_t ctgStartUpdateThread() { + TdThreadAttr thAttr; + taosThreadAttrInit(&thAttr); + taosThreadAttrSetDetachState(&thAttr, PTHREAD_CREATE_JOINABLE); + + if (taosThreadCreate(&gCtgMgmt.updateThread, &thAttr, ctgUpdateThreadFunc, NULL) != 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + CTG_ERR_RET(terrno); + } + + taosThreadAttrDestroy(&thAttr); + return TSDB_CODE_SUCCESS; +} + + + diff --git a/source/libs/catalog/src/ctgDbg.c b/source/libs/catalog/src/ctgDbg.c new file mode 100644 index 0000000000000000000000000000000000000000..fdab50db0f65fd67d16d6f5b134f847dc0f882bc --- /dev/null +++ b/source/libs/catalog/src/ctgDbg.c @@ -0,0 +1,408 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "trpc.h" +#include "query.h" +#include "tname.h" +#include "catalogInt.h" + +extern SCatalogMgmt gCtgMgmt; +SCtgDebug gCTGDebug = {0}; + +void ctgdUserCallback(SMetaData* pResult, void* param, int32_t code) { + ASSERT(*(int32_t*)param == 1); + taosMemoryFree(param); + + qDebug("async call result: %s", tstrerror(code)); + if (NULL == pResult) { + qDebug("empty meta result"); + return; + } + + int32_t num = 0; + + if (pResult->pTableMeta && taosArrayGetSize(pResult->pTableMeta) > 0) { + num = taosArrayGetSize(pResult->pTableMeta); + for (int32_t i = 0; i < num; ++i) { + STableMeta *p = *(STableMeta **)taosArrayGet(pResult->pTableMeta, i); + STableComInfo *c = &p->tableInfo; + + if (TSDB_CHILD_TABLE == p->tableType) { + qDebug("table meta: type:%d, vgId:%d, uid:%" PRIx64 ",suid:%" PRIx64, p->tableType, p->vgId, p->uid, p->suid); + } else { + qDebug("table meta: type:%d, vgId:%d, uid:%" PRIx64 ",suid:%" PRIx64 ",sv:%d, tv:%d, tagNum:%d, precision:%d, colNum:%d, rowSize:%d", + p->tableType, p->vgId, p->uid, p->suid, p->sversion, p->tversion, c->numOfTags, c->precision, c->numOfColumns, c->rowSize); + } + + int32_t colNum = c->numOfColumns + c->numOfTags; + for (int32_t j = 0; j < colNum; ++j) { + SSchema *s = &p->schema[j]; + qDebug("[%d] name:%s, type:%d, colId:%d, bytes:%d", j, s->name, s->type, s->colId, s->bytes); + } + } + } else { + qDebug("empty table meta"); + } + + if (pResult->pDbVgroup && taosArrayGetSize(pResult->pDbVgroup) > 0) { + num = taosArrayGetSize(pResult->pDbVgroup); + for (int32_t i = 0; i < num; ++i) { + SArray *pDb = *(SArray**)taosArrayGet(pResult->pDbVgroup, i); + int32_t vgNum = taosArrayGetSize(pDb); + qDebug("db %d vgInfo:", i); + for (int32_t j = 0; j < vgNum; ++j) { + SVgroupInfo* pInfo = taosArrayGet(pDb, j); + qDebug("vg %d info: vgId:%d", j, pInfo->vgId); + } + } + } else { + qDebug("empty db vgroup"); + } + + if (pResult->pDbInfo && taosArrayGetSize(pResult->pDbInfo) > 0) { + num = taosArrayGetSize(pResult->pDbInfo); + for (int32_t i = 0; i < num; ++i) { + SDbInfo *pDb = taosArrayGet(pResult->pDbInfo, i); + qDebug("db %d dbInfo: vgVer:%d, tbNum:%d, dbId:%" PRIx64, i, pDb->vgVer, pDb->tbNum, pDb->dbId); + } + } else { + qDebug("empty db info"); + } + + if (pResult->pTableHash && taosArrayGetSize(pResult->pTableHash) > 0) { + num = taosArrayGetSize(pResult->pTableHash); + for (int32_t i = 0; i < num; ++i) { + SVgroupInfo* pInfo = taosArrayGet(pResult->pTableHash, i); + qDebug("table %d vg info: vgId:%d", i, pInfo->vgId); + } + } else { + qDebug("empty table hash vgroup"); + } + + if (pResult->pUdfList && taosArrayGetSize(pResult->pUdfList) > 0) { + num = taosArrayGetSize(pResult->pUdfList); + for (int32_t i = 0; i < num; ++i) { + SFuncInfo* pInfo = taosArrayGet(pResult->pUdfList, i); + qDebug("udf %d info: name:%s, funcType:%d", i, pInfo->name, pInfo->funcType); + } + } else { + qDebug("empty udf info"); + } + + if (pResult->pDbCfg && taosArrayGetSize(pResult->pDbCfg) > 0) { + num = taosArrayGetSize(pResult->pDbCfg); + for (int32_t i = 0; i < num; ++i) { + SDbCfgInfo* pInfo = taosArrayGet(pResult->pDbCfg, i); + qDebug("db %d info: numOFVgroups:%d, numOfStables:%d", i, pInfo->numOfVgroups, pInfo->numOfStables); + } + } else { + qDebug("empty db cfg info"); + } + + if (pResult->pUser && taosArrayGetSize(pResult->pUser) > 0) { + num = taosArrayGetSize(pResult->pUser); + for (int32_t i = 0; i < num; ++i) { + bool* auth = taosArrayGet(pResult->pUser, i); + qDebug("user auth %d info: %d", i, *auth); + } + } else { + qDebug("empty user auth info"); + } + + if (pResult->pQnodeList && taosArrayGetSize(pResult->pQnodeList) > 0) { + num = taosArrayGetSize(pResult->pQnodeList); + for (int32_t i = 0; i < num; ++i) { + SQueryNodeAddr* qaddr = taosArrayGet(pResult->pQnodeList, i); + qDebug("qnode %d info: id:%d", i, qaddr->nodeId); + } + } else { + qDebug("empty qnode info"); + } +} + +int32_t ctgdLaunchAsyncCall(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, uint64_t reqId) { + int32_t code = 0; + SCatalogReq req = {0}; + req.pTableMeta = taosArrayInit(2, sizeof(SName)); + req.pDbVgroup = taosArrayInit(2, TSDB_DB_FNAME_LEN); + req.pDbInfo = taosArrayInit(2, TSDB_DB_FNAME_LEN); + req.pTableHash = taosArrayInit(2, sizeof(SName)); + req.pUdf = taosArrayInit(2, TSDB_FUNC_NAME_LEN); + req.pDbCfg = taosArrayInit(2, TSDB_DB_FNAME_LEN); + req.pIndex = NULL;//taosArrayInit(2, TSDB_INDEX_FNAME_LEN); + req.pUser = taosArrayInit(2, sizeof(SUserAuthInfo)); + req.qNodeRequired = true; + + SName name = {0}; + char dbFName[TSDB_DB_FNAME_LEN] = {0}; + char funcName[TSDB_FUNC_NAME_LEN] = {0}; + SUserAuthInfo user = {0}; + + tNameFromString(&name, "1.db1.tb1", T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); + taosArrayPush(req.pTableMeta, &name); + taosArrayPush(req.pTableHash, &name); + tNameFromString(&name, "1.db1.st1", T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); + taosArrayPush(req.pTableMeta, &name); + taosArrayPush(req.pTableHash, &name); + + strcpy(dbFName, "1.db1"); + taosArrayPush(req.pDbVgroup, dbFName); + taosArrayPush(req.pDbCfg, dbFName); + taosArrayPush(req.pDbInfo, dbFName); + strcpy(dbFName, "1.db2"); + taosArrayPush(req.pDbVgroup, dbFName); + taosArrayPush(req.pDbCfg, dbFName); + taosArrayPush(req.pDbInfo, dbFName); + + strcpy(funcName, "udf1"); + taosArrayPush(req.pUdf, funcName); + strcpy(funcName, "udf2"); + taosArrayPush(req.pUdf, funcName); + + strcpy(user.user, "root"); + strcpy(user.dbFName, "1.db1"); + user.type = AUTH_TYPE_READ; + taosArrayPush(req.pUser, &user); + user.type = AUTH_TYPE_WRITE; + taosArrayPush(req.pUser, &user); + user.type = AUTH_TYPE_OTHER; + taosArrayPush(req.pUser, &user); + + strcpy(user.user, "user1"); + strcpy(user.dbFName, "1.db2"); + user.type = AUTH_TYPE_READ; + taosArrayPush(req.pUser, &user); + user.type = AUTH_TYPE_WRITE; + taosArrayPush(req.pUser, &user); + user.type = AUTH_TYPE_OTHER; + taosArrayPush(req.pUser, &user); + + int32_t *param = taosMemoryCalloc(1, sizeof(int32_t)); + *param = 1; + + int64_t jobId = 0; + CTG_ERR_JRET(catalogAsyncGetAllMeta(pCtg, pTrans, pMgmtEps, reqId, &req, ctgdUserCallback, param, &jobId)); + +_return: + + taosArrayDestroy(req.pTableMeta); + taosArrayDestroy(req.pDbVgroup); + taosArrayDestroy(req.pTableHash); + taosArrayDestroy(req.pUdf); + taosArrayDestroy(req.pDbCfg); + taosArrayDestroy(req.pUser); + + CTG_RET(code); +} + +int32_t ctgdEnableDebug(char *option) { + if (0 == strcasecmp(option, "lock")) { + gCTGDebug.lockEnable = true; + qDebug("lock debug enabled"); + return TSDB_CODE_SUCCESS; + } + + if (0 == strcasecmp(option, "cache")) { + gCTGDebug.cacheEnable = true; + qDebug("cache debug enabled"); + return TSDB_CODE_SUCCESS; + } + + if (0 == strcasecmp(option, "api")) { + gCTGDebug.apiEnable = true; + qDebug("api debug enabled"); + return TSDB_CODE_SUCCESS; + } + + if (0 == strcasecmp(option, "meta")) { + gCTGDebug.metaEnable = true; + qDebug("api debug enabled"); + return TSDB_CODE_SUCCESS; + } + + qError("invalid debug option:%s", option); + + return TSDB_CODE_CTG_INTERNAL_ERROR; +} + +int32_t ctgdGetStatNum(char *option, void *res) { + if (0 == strcasecmp(option, "runtime.qDoneNum")) { + *(uint64_t *)res = atomic_load_64(&gCtgMgmt.stat.runtime.qDoneNum); + return TSDB_CODE_SUCCESS; + } + + qError("invalid stat option:%s", option); + + return TSDB_CODE_CTG_INTERNAL_ERROR; +} + +int32_t ctgdGetTbMetaNum(SCtgDBCache *dbCache) { + return dbCache->tbCache.metaCache ? (int32_t)taosHashGetSize(dbCache->tbCache.metaCache) : 0; +} + +int32_t ctgdGetStbNum(SCtgDBCache *dbCache) { + return dbCache->tbCache.stbCache ? (int32_t)taosHashGetSize(dbCache->tbCache.stbCache) : 0; +} + +int32_t ctgdGetRentNum(SCtgRentMgmt *rent) { + int32_t num = 0; + for (uint16_t i = 0; i < rent->slotNum; ++i) { + SCtgRentSlot *slot = &rent->slots[i]; + if (NULL == slot->meta) { + continue; + } + + num += taosArrayGetSize(slot->meta); + } + + return num; +} + +int32_t ctgdGetClusterCacheNum(SCatalog* pCtg, int32_t type) { + if (NULL == pCtg || NULL == pCtg->dbCache) { + return 0; + } + + switch (type) { + case CTG_DBG_DB_NUM: + return (int32_t)taosHashGetSize(pCtg->dbCache); + case CTG_DBG_DB_RENT_NUM: + return ctgdGetRentNum(&pCtg->dbRent); + case CTG_DBG_STB_RENT_NUM: + return ctgdGetRentNum(&pCtg->stbRent); + default: + break; + } + + SCtgDBCache *dbCache = NULL; + int32_t num = 0; + void *pIter = taosHashIterate(pCtg->dbCache, NULL); + while (pIter) { + dbCache = (SCtgDBCache *)pIter; + switch (type) { + case CTG_DBG_META_NUM: + num += ctgdGetTbMetaNum(dbCache); + break; + case CTG_DBG_STB_NUM: + num += ctgdGetStbNum(dbCache); + break; + default: + ctgError("invalid type:%d", type); + break; + } + pIter = taosHashIterate(pCtg->dbCache, pIter); + } + + return num; +} + +void ctgdShowTableMeta(SCatalog* pCtg, const char *tbName, STableMeta* p) { + if (!gCTGDebug.metaEnable) { + return; + } + + STableComInfo *c = &p->tableInfo; + + if (TSDB_CHILD_TABLE == p->tableType) { + ctgDebug("table [%s] meta: type:%d, vgId:%d, uid:%" PRIx64 ",suid:%" PRIx64, tbName, p->tableType, p->vgId, p->uid, p->suid); + return; + } else { + ctgDebug("table [%s] meta: type:%d, vgId:%d, uid:%" PRIx64 ",suid:%" PRIx64 ",sv:%d, tv:%d, tagNum:%d, precision:%d, colNum:%d, rowSize:%d", + tbName, p->tableType, p->vgId, p->uid, p->suid, p->sversion, p->tversion, c->numOfTags, c->precision, c->numOfColumns, c->rowSize); + } + + int32_t colNum = c->numOfColumns + c->numOfTags; + for (int32_t i = 0; i < colNum; ++i) { + SSchema *s = &p->schema[i]; + ctgDebug("[%d] name:%s, type:%d, colId:%d, bytes:%d", i, s->name, s->type, s->colId, s->bytes); + } +} + +void ctgdShowDBCache(SCatalog* pCtg, SHashObj *dbHash) { + if (NULL == dbHash || !gCTGDebug.cacheEnable) { + return; + } + + int32_t i = 0; + SCtgDBCache *dbCache = NULL; + void *pIter = taosHashIterate(dbHash, NULL); + while (pIter) { + char *dbFName = NULL; + size_t len = 0; + + dbCache = (SCtgDBCache *)pIter; + + dbFName = taosHashGetKey(pIter, &len); + + int32_t metaNum = dbCache->tbCache.metaCache ? taosHashGetSize(dbCache->tbCache.metaCache) : 0; + int32_t stbNum = dbCache->tbCache.stbCache ? taosHashGetSize(dbCache->tbCache.stbCache) : 0; + int32_t vgVersion = CTG_DEFAULT_INVALID_VERSION; + int32_t hashMethod = -1; + int32_t vgNum = 0; + + if (dbCache->vgInfo) { + vgVersion = dbCache->vgInfo->vgVersion; + hashMethod = dbCache->vgInfo->hashMethod; + if (dbCache->vgInfo->vgHash) { + vgNum = taosHashGetSize(dbCache->vgInfo->vgHash); + } + } + + ctgDebug("[%d] db [%.*s][%"PRIx64"] %s: metaNum:%d, stbNum:%d, vgVersion:%d, hashMethod:%d, vgNum:%d", + i, (int32_t)len, dbFName, dbCache->dbId, dbCache->deleted?"deleted":"", metaNum, stbNum, vgVersion, hashMethod, vgNum); + + pIter = taosHashIterate(dbHash, pIter); + } +} + + + + +void ctgdShowClusterCache(SCatalog* pCtg) { + if (!gCTGDebug.cacheEnable || NULL == pCtg) { + return; + } + + ctgDebug("## cluster %"PRIx64" %p cache Info BEGIN ##", pCtg->clusterId, pCtg); + ctgDebug("db:%d meta:%d stb:%d dbRent:%d stbRent:%d", ctgdGetClusterCacheNum(pCtg, CTG_DBG_DB_NUM), ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM), + ctgdGetClusterCacheNum(pCtg, CTG_DBG_STB_NUM), ctgdGetClusterCacheNum(pCtg, CTG_DBG_DB_RENT_NUM), ctgdGetClusterCacheNum(pCtg, CTG_DBG_STB_RENT_NUM)); + + ctgdShowDBCache(pCtg, pCtg->dbCache); + + ctgDebug("## cluster %"PRIx64" %p cache Info END ##", pCtg->clusterId, pCtg); +} + +int32_t ctgdShowCacheInfo(void) { + if (!gCTGDebug.cacheEnable) { + return TSDB_CODE_CTG_OUT_OF_SERVICE; + } + + CTG_API_ENTER(); + + SCatalog *pCtg = NULL; + void *pIter = taosHashIterate(gCtgMgmt.pCluster, NULL); + while (pIter) { + pCtg = *(SCatalog **)pIter; + + if (pCtg) { + ctgdShowClusterCache(pCtg); + } + + pIter = taosHashIterate(gCtgMgmt.pCluster, pIter); + } + + CTG_API_LEAVE(TSDB_CODE_SUCCESS); +} + diff --git a/source/libs/catalog/src/ctgRemote.c b/source/libs/catalog/src/ctgRemote.c new file mode 100644 index 0000000000000000000000000000000000000000..4def1fff4f3c2185de569a706f59ace1c215d488 --- /dev/null +++ b/source/libs/catalog/src/ctgRemote.c @@ -0,0 +1,585 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "trpc.h" +#include "query.h" +#include "tname.h" +#include "catalogInt.h" +#include "systable.h" +#include "ctgRemote.h" +#include "tref.h" + +int32_t ctgProcessRspMsg(void* out, int32_t reqType, char* msg, int32_t msgSize, int32_t rspCode, char* target) { + int32_t code = 0; + + switch (reqType) { + case TDMT_MND_QNODE_LIST: { + if (TSDB_CODE_SUCCESS != rspCode) { + qError("error rsp for qnode list, error:%s", tstrerror(rspCode)); + CTG_ERR_RET(rspCode); + } + + code = queryProcessMsgRsp[TMSG_INDEX(reqType)](out, msg, msgSize); + if (code) { + qError("Process qnode list rsp failed, error:%s", tstrerror(rspCode)); + CTG_ERR_RET(code); + } + + qDebug("Got qnode list from mnode, listNum:%d", (int32_t)taosArrayGetSize(out)); + break; + } + case TDMT_MND_USE_DB: { + if (TSDB_CODE_SUCCESS != rspCode) { + qError("error rsp for use db, error:%s, dbFName:%s", tstrerror(rspCode), target); + CTG_ERR_RET(rspCode); + } + + code = queryProcessMsgRsp[TMSG_INDEX(reqType)](out, msg, msgSize); + if (code) { + qError("Process use db rsp failed, error:%s, dbFName:%s", tstrerror(code), target); + CTG_ERR_RET(code); + } + + qDebug("Got db vgInfo from mnode, dbFName:%s", target); + break; + } + case TDMT_MND_GET_DB_CFG: { + if (TSDB_CODE_SUCCESS != rspCode) { + qError("error rsp for get db cfg, error:%s, db:%s", tstrerror(rspCode), target); + CTG_ERR_RET(rspCode); + } + + code = queryProcessMsgRsp[TMSG_INDEX(reqType)](out, msg, msgSize); + if (code) { + qError("Process get db cfg rsp failed, error:%s, db:%s", tstrerror(code), target); + CTG_ERR_RET(code); + } + + qDebug("Got db cfg from mnode, dbFName:%s", target); + break; + } + case TDMT_MND_GET_INDEX: { + if (TSDB_CODE_SUCCESS != rspCode) { + qError("error rsp for get index, error:%s, indexName:%s", tstrerror(rspCode), target); + CTG_ERR_RET(rspCode); + } + + code = queryProcessMsgRsp[TMSG_INDEX(reqType)](out, msg, msgSize); + if (code) { + qError("Process get index rsp failed, error:%s, indexName:%s", tstrerror(code), target); + CTG_ERR_RET(code); + } + + qDebug("Got index from mnode, indexName:%s", target); + break; + } + case TDMT_MND_RETRIEVE_FUNC: { + if (TSDB_CODE_SUCCESS != rspCode) { + qError("error rsp for get udf, error:%s, funcName:%s", tstrerror(rspCode), target); + CTG_ERR_RET(rspCode); + } + + code = queryProcessMsgRsp[TMSG_INDEX(reqType)](out, msg, msgSize); + if (code) { + qError("Process get udf rsp failed, error:%s, funcName:%s", tstrerror(code), target); + CTG_ERR_RET(code); + } + + qDebug("Got udf from mnode, funcName:%s", target); + break; + } + case TDMT_MND_GET_USER_AUTH: { + if (TSDB_CODE_SUCCESS != rspCode) { + qError("error rsp for get user auth, error:%s, user:%s", tstrerror(rspCode), target); + CTG_ERR_RET(rspCode); + } + + code = queryProcessMsgRsp[TMSG_INDEX(reqType)](out, msg, msgSize); + if (code) { + qError("Process get user auth rsp failed, error:%s, user:%s", tstrerror(code), target); + CTG_ERR_RET(code); + } + + qDebug("Got user auth from mnode, user:%s", target); + break; + } + case TDMT_MND_TABLE_META: { + if (TSDB_CODE_SUCCESS != rspCode) { + if (CTG_TABLE_NOT_EXIST(rspCode)) { + SET_META_TYPE_NULL(((STableMetaOutput*)out)->metaType); + qDebug("stablemeta not exist in mnode, tbFName:%s", target); + return TSDB_CODE_SUCCESS; + } + + qError("error rsp for stablemeta from mnode, error:%s, tbFName:%s", tstrerror(rspCode), target); + CTG_ERR_RET(rspCode); + } + + code = queryProcessMsgRsp[TMSG_INDEX(reqType)](out, msg, msgSize); + if (code) { + qError("Process mnode stablemeta rsp failed, error:%s, tbFName:%s", tstrerror(code), target); + CTG_ERR_RET(code); + } + + qDebug("Got table meta from mnode, tbFName:%s", target); + break; + } + case TDMT_VND_TABLE_META: { + if (TSDB_CODE_SUCCESS != rspCode) { + if (CTG_TABLE_NOT_EXIST(rspCode)) { + SET_META_TYPE_NULL(((STableMetaOutput*)out)->metaType); + qDebug("tablemeta not exist in vnode, tbFName:%s", target); + return TSDB_CODE_SUCCESS; + } + + qError("error rsp for table meta from vnode, code:%s, tbFName:%s", tstrerror(rspCode), target); + CTG_ERR_RET(rspCode); + } + + code = queryProcessMsgRsp[TMSG_INDEX(reqType)](out, msg, msgSize); + if (code) { + qError("Process vnode tablemeta rsp failed, code:%s, tbFName:%s", tstrerror(code), target); + CTG_ERR_RET(code); + } + + qDebug("Got table meta from vnode, tbFName:%s", target); + break; + } + } + + return TSDB_CODE_SUCCESS; +} + + +int32_t ctgHandleMsgCallback(void *param, const SDataBuf *pMsg, int32_t rspCode) { + SCtgTaskCallbackParam* cbParam = (SCtgTaskCallbackParam*)param; + int32_t code = 0; + + CTG_API_ENTER(); + + SCtgJob* pJob = taosAcquireRef(gCtgMgmt.jobPool, cbParam->refId); + if (NULL == pJob) { + qDebug("job refId %" PRIx64 " already dropped", cbParam->refId); + goto _return; + } + + SCtgTask *pTask = taosArrayGet(pJob->pTasks, cbParam->taskId); + + qDebug("QID:%" PRIx64 " task %d start to handle rsp %s", pJob->queryId, pTask->taskId, TMSG_INFO(cbParam->reqType + 1)); + + CTG_ERR_JRET((*gCtgAsyncFps[pTask->type].handleRspFp)(pTask, cbParam->reqType, pMsg, rspCode)); + +_return: + + if (pJob) { + taosReleaseRef(gCtgMgmt.jobPool, cbParam->refId); + } + + taosMemoryFree(param); + + CTG_API_LEAVE(code); +} + + +int32_t ctgMakeMsgSendInfo(SCtgTask* pTask, int32_t msgType, SMsgSendInfo **pMsgSendInfo) { + int32_t code = 0; + SMsgSendInfo *msgSendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo)); + if (NULL == msgSendInfo) { + qError("calloc %d failed", (int32_t)sizeof(SMsgSendInfo)); + CTG_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + SCtgTaskCallbackParam *param = taosMemoryCalloc(1, sizeof(SCtgTaskCallbackParam)); + if (NULL == param) { + qError("calloc %d failed", (int32_t)sizeof(SCtgTaskCallbackParam)); + CTG_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + param->reqType = msgType; + param->queryId = pTask->pJob->queryId; + param->refId = pTask->pJob->refId; + param->taskId = pTask->taskId; + + msgSendInfo->param = param; + msgSendInfo->fp = ctgHandleMsgCallback; + + *pMsgSendInfo = msgSendInfo; + + return TSDB_CODE_SUCCESS; + +_return: + + taosMemoryFree(param); + taosMemoryFree(msgSendInfo); + + CTG_RET(code); +} + +int32_t ctgAsyncSendMsg(CTG_PARAMS, SCtgTask* pTask, int32_t msgType, void *msg, uint32_t msgSize) { + int32_t code = 0; + SMsgSendInfo *pMsgSendInfo = NULL; + CTG_ERR_JRET(ctgMakeMsgSendInfo(pTask, msgType, &pMsgSendInfo)); + + pMsgSendInfo->msgInfo.pData = msg; + pMsgSendInfo->msgInfo.len = msgSize; + pMsgSendInfo->msgInfo.handle = NULL; + pMsgSendInfo->msgType = msgType; + + int64_t transporterId = 0; + code = asyncSendMsgToServer(pTrans, (SEpSet*)pMgmtEps, &transporterId, pMsgSendInfo); + if (code) { + ctgError("asyncSendMsgToSever failed, error: %s", tstrerror(code)); + CTG_ERR_JRET(code); + } + + ctgDebug("req msg sent, reqId:%" PRIx64 ", msg type:%d, %s", pTask->pJob->queryId, msgType, TMSG_INFO(msgType)); + return TSDB_CODE_SUCCESS; + +_return: + + if (pMsgSendInfo) { + taosMemoryFreeClear(pMsgSendInfo->param); + taosMemoryFreeClear(pMsgSendInfo); + } + + CTG_RET(code); +} + + + + +int32_t ctgGetQnodeListFromMnode(CTG_PARAMS, SArray *out, SCtgTask* pTask) { + char *msg = NULL; + int32_t msgLen = 0; + int32_t reqType = TDMT_MND_QNODE_LIST; + void*(*mallocFp)(int32_t) = pTask ? taosMemoryMalloc : rpcMallocCont; + + ctgDebug("try to get qnode list from mnode, mgmtEpInUse:%d", pMgmtEps->inUse); + + int32_t code = queryBuildMsg[TMSG_INDEX(reqType)](NULL, &msg, 0, &msgLen, mallocFp); + if (code) { + ctgError("Build qnode list msg failed, error:%s", tstrerror(code)); + CTG_ERR_RET(code); + } + + if (pTask) { + void* pOut = taosArrayInit(4, sizeof(struct SQueryNodeAddr)); + if (NULL == pOut) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + CTG_ERR_RET(ctgUpdateMsgCtx(&pTask->msgCtx, reqType, pOut, NULL)); + CTG_RET(ctgAsyncSendMsg(CTG_PARAMS_LIST(), pTask, reqType, msg, msgLen)); + } + + SRpcMsg rpcMsg = { + .msgType = reqType, + .pCont = msg, + .contLen = msgLen, + }; + + SRpcMsg rpcRsp = {0}; + rpcSendRecv(pTrans, (SEpSet*)pMgmtEps, &rpcMsg, &rpcRsp); + + CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, NULL)); + + return TSDB_CODE_SUCCESS; +} + + +int32_t ctgGetDBVgInfoFromMnode(CTG_PARAMS, SBuildUseDBInput *input, SUseDbOutput *out, SCtgTask* pTask) { + char *msg = NULL; + int32_t msgLen = 0; + int32_t reqType = TDMT_MND_USE_DB; + void*(*mallocFp)(int32_t) = pTask ? taosMemoryMalloc : rpcMallocCont; + + ctgDebug("try to get db vgInfo from mnode, dbFName:%s", input->db); + + int32_t code = queryBuildMsg[TMSG_INDEX(reqType)](input, &msg, 0, &msgLen, mallocFp); + if (code) { + ctgError("Build use db msg failed, code:%x, db:%s", code, input->db); + CTG_ERR_RET(code); + } + + if (pTask) { + void* pOut = taosMemoryCalloc(1, sizeof(SUseDbOutput)); + if (NULL == pOut) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + CTG_ERR_RET(ctgUpdateMsgCtx(&pTask->msgCtx, reqType, pOut, input->db)); + + CTG_RET(ctgAsyncSendMsg(CTG_PARAMS_LIST(), pTask, reqType, msg, msgLen)); + } + + SRpcMsg rpcMsg = { + .msgType = reqType, + .pCont = msg, + .contLen = msgLen, + }; + + SRpcMsg rpcRsp = {0}; + rpcSendRecv(pTrans, (SEpSet*)pMgmtEps, &rpcMsg, &rpcRsp); + + CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, input->db)); + + return TSDB_CODE_SUCCESS; +} + +int32_t ctgGetDBCfgFromMnode(CTG_PARAMS, const char *dbFName, SDbCfgInfo *out, SCtgTask* pTask) { + char *msg = NULL; + int32_t msgLen = 0; + int32_t reqType = TDMT_MND_GET_DB_CFG; + void*(*mallocFp)(int32_t) = pTask ? taosMemoryMalloc : rpcMallocCont; + + ctgDebug("try to get db cfg from mnode, dbFName:%s", dbFName); + + int32_t code = queryBuildMsg[TMSG_INDEX(reqType)]((void *)dbFName, &msg, 0, &msgLen, mallocFp); + if (code) { + ctgError("Build get db cfg msg failed, code:%x, db:%s", code, dbFName); + CTG_ERR_RET(code); + } + + if (pTask) { + void* pOut = taosMemoryCalloc(1, sizeof(SDbCfgInfo)); + if (NULL == pOut) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + CTG_ERR_RET(ctgUpdateMsgCtx(&pTask->msgCtx, reqType, pOut, (char*)dbFName)); + + CTG_RET(ctgAsyncSendMsg(CTG_PARAMS_LIST(), pTask, reqType, msg, msgLen)); + } + + SRpcMsg rpcMsg = { + .msgType = TDMT_MND_GET_DB_CFG, + .pCont = msg, + .contLen = msgLen, + }; + + SRpcMsg rpcRsp = {0}; + rpcSendRecv(pTrans, (SEpSet*)pMgmtEps, &rpcMsg, &rpcRsp); + + CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, (char*)dbFName)); + + return TSDB_CODE_SUCCESS; +} + +int32_t ctgGetIndexInfoFromMnode(CTG_PARAMS, const char *indexName, SIndexInfo *out, SCtgTask* pTask) { + char *msg = NULL; + int32_t msgLen = 0; + int32_t reqType = TDMT_MND_GET_INDEX; + void*(*mallocFp)(int32_t) = pTask ? taosMemoryMalloc : rpcMallocCont; + + ctgDebug("try to get index from mnode, indexName:%s", indexName); + + int32_t code = queryBuildMsg[TMSG_INDEX(reqType)]((void *)indexName, &msg, 0, &msgLen, mallocFp); + if (code) { + ctgError("Build get index msg failed, code:%x, db:%s", code, indexName); + CTG_ERR_RET(code); + } + + if (pTask) { + void* pOut = taosMemoryCalloc(1, sizeof(SIndexInfo)); + if (NULL == pOut) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + CTG_ERR_RET(ctgUpdateMsgCtx(&pTask->msgCtx, reqType, pOut, (char*)indexName)); + + CTG_RET(ctgAsyncSendMsg(CTG_PARAMS_LIST(), pTask, reqType, msg, msgLen)); + } + + SRpcMsg rpcMsg = { + .msgType = reqType, + .pCont = msg, + .contLen = msgLen, + }; + + SRpcMsg rpcRsp = {0}; + rpcSendRecv(pTrans, (SEpSet*)pMgmtEps, &rpcMsg, &rpcRsp); + + CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, (char*)indexName)); + + return TSDB_CODE_SUCCESS; +} + +int32_t ctgGetUdfInfoFromMnode(CTG_PARAMS, const char *funcName, SFuncInfo *out, SCtgTask* pTask) { + char *msg = NULL; + int32_t msgLen = 0; + int32_t reqType = TDMT_MND_RETRIEVE_FUNC; + void*(*mallocFp)(int32_t) = pTask ? taosMemoryMalloc : rpcMallocCont; + + ctgDebug("try to get udf info from mnode, funcName:%s", funcName); + + int32_t code = queryBuildMsg[TMSG_INDEX(reqType)]((void *)funcName, &msg, 0, &msgLen, mallocFp); + if (code) { + ctgError("Build get udf msg failed, code:%x, db:%s", code, funcName); + CTG_ERR_RET(code); + } + + if (pTask) { + void* pOut = taosMemoryCalloc(1, sizeof(SFuncInfo)); + if (NULL == pOut) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + CTG_ERR_RET(ctgUpdateMsgCtx(&pTask->msgCtx, reqType, pOut, (char*)funcName)); + + CTG_RET(ctgAsyncSendMsg(CTG_PARAMS_LIST(), pTask, reqType, msg, msgLen)); + } + + SRpcMsg rpcMsg = { + .msgType = reqType, + .pCont = msg, + .contLen = msgLen, + }; + + SRpcMsg rpcRsp = {0}; + rpcSendRecv(pTrans, (SEpSet*)pMgmtEps, &rpcMsg, &rpcRsp); + + CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, (char*)funcName)); + + return TSDB_CODE_SUCCESS; +} + +int32_t ctgGetUserDbAuthFromMnode(CTG_PARAMS, const char *user, SGetUserAuthRsp *out, SCtgTask* pTask) { + char *msg = NULL; + int32_t msgLen = 0; + int32_t reqType = TDMT_MND_GET_USER_AUTH; + void*(*mallocFp)(int32_t) = pTask ? taosMemoryMalloc : rpcMallocCont; + + ctgDebug("try to get user auth from mnode, user:%s", user); + + int32_t code = queryBuildMsg[TMSG_INDEX(reqType)]((void *)user, &msg, 0, &msgLen, mallocFp); + if (code) { + ctgError("Build get user auth msg failed, code:%x, db:%s", code, user); + CTG_ERR_RET(code); + } + + if (pTask) { + void* pOut = taosMemoryCalloc(1, sizeof(SGetUserAuthRsp)); + if (NULL == pOut) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + CTG_ERR_RET(ctgUpdateMsgCtx(&pTask->msgCtx, reqType, pOut, (char*)user)); + + CTG_RET(ctgAsyncSendMsg(CTG_PARAMS_LIST(), pTask, reqType, msg, msgLen)); + } + + SRpcMsg rpcMsg = { + .msgType = reqType, + .pCont = msg, + .contLen = msgLen, + }; + + SRpcMsg rpcRsp = {0}; + rpcSendRecv(pTrans, (SEpSet*)pMgmtEps, &rpcMsg, &rpcRsp); + + CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, (char*)user)); + + return TSDB_CODE_SUCCESS; +} + + +int32_t ctgGetTbMetaFromMnodeImpl(CTG_PARAMS, char *dbFName, char* tbName, STableMetaOutput* out, SCtgTask* pTask) { + SBuildTableMetaInput bInput = {.vgId = 0, .dbFName = dbFName, .tbName = tbName}; + char *msg = NULL; + SEpSet *pVnodeEpSet = NULL; + int32_t msgLen = 0; + int32_t reqType = TDMT_MND_TABLE_META; + char tbFName[TSDB_TABLE_FNAME_LEN]; + sprintf(tbFName, "%s.%s", dbFName, tbName); + void*(*mallocFp)(int32_t) = pTask ? taosMemoryMalloc : rpcMallocCont; + + ctgDebug("try to get table meta from mnode, tbFName:%s", tbFName); + + int32_t code = queryBuildMsg[TMSG_INDEX(reqType)](&bInput, &msg, 0, &msgLen, mallocFp); + if (code) { + ctgError("Build mnode stablemeta msg failed, code:%x", code); + CTG_ERR_RET(code); + } + + if (pTask) { + void* pOut = taosMemoryCalloc(1, sizeof(STableMetaOutput)); + if (NULL == pOut) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + CTG_ERR_RET(ctgUpdateMsgCtx(&pTask->msgCtx, reqType, pOut, tbFName)); + + CTG_RET(ctgAsyncSendMsg(CTG_PARAMS_LIST(), pTask, reqType, msg, msgLen)); + } + + SRpcMsg rpcMsg = { + .msgType = reqType, + .pCont = msg, + .contLen = msgLen, + }; + + SRpcMsg rpcRsp = {0}; + rpcSendRecv(pTrans, (SEpSet*)pMgmtEps, &rpcMsg, &rpcRsp); + + CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, tbFName)); + + return TSDB_CODE_SUCCESS; +} + +int32_t ctgGetTbMetaFromMnode(CTG_PARAMS, const SName* pTableName, STableMetaOutput* out, SCtgTask* pTask) { + char dbFName[TSDB_DB_FNAME_LEN]; + tNameGetFullDbName(pTableName, dbFName); + + return ctgGetTbMetaFromMnodeImpl(CTG_PARAMS_LIST(), dbFName, (char *)pTableName->tname, out, pTask); +} + +int32_t ctgGetTbMetaFromVnode(CTG_PARAMS, const SName* pTableName, SVgroupInfo *vgroupInfo, STableMetaOutput* out, SCtgTask* pTask) { + char dbFName[TSDB_DB_FNAME_LEN]; + tNameGetFullDbName(pTableName, dbFName); + int32_t reqType = TDMT_VND_TABLE_META; + char tbFName[TSDB_TABLE_FNAME_LEN]; + sprintf(tbFName, "%s.%s", dbFName, pTableName->tname); + void*(*mallocFp)(int32_t) = pTask ? taosMemoryMalloc : rpcMallocCont; + + ctgDebug("try to get table meta from vnode, vgId:%d, tbFName:%s", vgroupInfo->vgId, tbFName); + + SBuildTableMetaInput bInput = {.vgId = vgroupInfo->vgId, .dbFName = dbFName, .tbName = (char *)tNameGetTableName(pTableName)}; + char *msg = NULL; + int32_t msgLen = 0; + + int32_t code = queryBuildMsg[TMSG_INDEX(reqType)](&bInput, &msg, 0, &msgLen, mallocFp); + if (code) { + ctgError("Build vnode tablemeta msg failed, code:%x, tbFName:%s", code, tbFName); + CTG_ERR_RET(code); + } + + if (pTask) { + void* pOut = taosMemoryCalloc(1, sizeof(STableMetaOutput)); + if (NULL == pOut) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + CTG_ERR_RET(ctgUpdateMsgCtx(&pTask->msgCtx, reqType, pOut, tbFName)); + + CTG_RET(ctgAsyncSendMsg(CTG_PARAMS_LIST(), pTask, reqType, msg, msgLen)); + } + + SRpcMsg rpcMsg = { + .msgType = reqType, + .pCont = msg, + .contLen = msgLen, + }; + + SRpcMsg rpcRsp = {0}; + rpcSendRecv(pTrans, &vgroupInfo->epSet, &rpcMsg, &rpcRsp); + + CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, tbFName)); + + return TSDB_CODE_SUCCESS; +} + + diff --git a/source/libs/catalog/src/ctgUtil.c b/source/libs/catalog/src/ctgUtil.c new file mode 100644 index 0000000000000000000000000000000000000000..4fbf1463d8f0191a26c99399f26e66d32b319ca5 --- /dev/null +++ b/source/libs/catalog/src/ctgUtil.c @@ -0,0 +1,590 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "trpc.h" +#include "query.h" +#include "tname.h" +#include "catalogInt.h" +#include "systable.h" + +void ctgFreeSMetaData(SMetaData* pData) { + taosArrayDestroy(pData->pTableMeta); + pData->pTableMeta = NULL; + + for (int32_t i = 0; i < taosArrayGetSize(pData->pDbVgroup); ++i) { + SArray** pArray = taosArrayGet(pData->pDbVgroup, i); + taosArrayDestroy(*pArray); + } + taosArrayDestroy(pData->pDbVgroup); + pData->pDbVgroup = NULL; + + taosArrayDestroy(pData->pTableHash); + pData->pTableHash = NULL; + + taosArrayDestroy(pData->pUdfList); + pData->pUdfList = NULL; + + for (int32_t i = 0; i < taosArrayGetSize(pData->pDbCfg); ++i) { + SDbCfgInfo* pInfo = taosArrayGet(pData->pDbCfg, i); + taosArrayDestroy(pInfo->pRetensions); + } + taosArrayDestroy(pData->pDbCfg); + pData->pDbCfg = NULL; + + taosArrayDestroy(pData->pDbInfo); + pData->pDbInfo = NULL; + + taosArrayDestroy(pData->pIndex); + pData->pIndex = NULL; + + taosArrayDestroy(pData->pUser); + pData->pUser = NULL; + + taosArrayDestroy(pData->pQnodeList); + pData->pQnodeList = NULL; +} + +void ctgFreeSCtgUserAuth(SCtgUserAuth *userCache) { + taosHashCleanup(userCache->createdDbs); + taosHashCleanup(userCache->readDbs); + taosHashCleanup(userCache->writeDbs); +} + +void ctgFreeMetaRent(SCtgRentMgmt *mgmt) { + if (NULL == mgmt->slots) { + return; + } + + for (int32_t i = 0; i < mgmt->slotNum; ++i) { + SCtgRentSlot *slot = &mgmt->slots[i]; + if (slot->meta) { + taosArrayDestroy(slot->meta); + slot->meta = NULL; + } + } + + taosMemoryFreeClear(mgmt->slots); +} + + +void ctgFreeTbMetaCache(SCtgTbMetaCache *cache) { + CTG_LOCK(CTG_WRITE, &cache->stbLock); + if (cache->stbCache) { + int32_t stblNum = taosHashGetSize(cache->stbCache); + taosHashCleanup(cache->stbCache); + cache->stbCache = NULL; + CTG_CACHE_STAT_SUB(stblNum, stblNum); + } + CTG_UNLOCK(CTG_WRITE, &cache->stbLock); + + CTG_LOCK(CTG_WRITE, &cache->metaLock); + if (cache->metaCache) { + int32_t tblNum = taosHashGetSize(cache->metaCache); + taosHashCleanup(cache->metaCache); + cache->metaCache = NULL; + CTG_CACHE_STAT_SUB(tblNum, tblNum); + } + CTG_UNLOCK(CTG_WRITE, &cache->metaLock); +} + +void ctgFreeVgInfo(SDBVgInfo *vgInfo) { + if (NULL == vgInfo) { + return; + } + + if (vgInfo->vgHash) { + taosHashCleanup(vgInfo->vgHash); + vgInfo->vgHash = NULL; + } + + taosMemoryFreeClear(vgInfo); +} + +void ctgFreeDbCache(SCtgDBCache *dbCache) { + if (NULL == dbCache) { + return; + } + + CTG_LOCK(CTG_WRITE, &dbCache->vgLock); + ctgFreeVgInfo (dbCache->vgInfo); + CTG_UNLOCK(CTG_WRITE, &dbCache->vgLock); + + ctgFreeTbMetaCache(&dbCache->tbCache); +} + + +void ctgFreeHandle(SCatalog* pCtg) { + ctgFreeMetaRent(&pCtg->dbRent); + ctgFreeMetaRent(&pCtg->stbRent); + + if (pCtg->dbCache) { + int32_t dbNum = taosHashGetSize(pCtg->dbCache); + + void *pIter = taosHashIterate(pCtg->dbCache, NULL); + while (pIter) { + SCtgDBCache *dbCache = pIter; + + atomic_store_8(&dbCache->deleted, 1); + + ctgFreeDbCache(dbCache); + + pIter = taosHashIterate(pCtg->dbCache, pIter); + } + + taosHashCleanup(pCtg->dbCache); + + CTG_CACHE_STAT_SUB(dbNum, dbNum); + } + + if (pCtg->userCache) { + int32_t userNum = taosHashGetSize(pCtg->userCache); + + void *pIter = taosHashIterate(pCtg->userCache, NULL); + while (pIter) { + SCtgUserAuth *userCache = pIter; + + ctgFreeSCtgUserAuth(userCache); + + pIter = taosHashIterate(pCtg->userCache, pIter); + } + + taosHashCleanup(pCtg->userCache); + + CTG_CACHE_STAT_SUB(userNum, userNum); + } + + taosMemoryFree(pCtg); +} + + +void ctgFreeSUseDbOutput(SUseDbOutput* pOutput) { + if (NULL == pOutput) { + return; + } + + if (pOutput->dbVgroup) { + taosHashCleanup(pOutput->dbVgroup->vgHash); + taosMemoryFreeClear(pOutput->dbVgroup); + } + + taosMemoryFree(pOutput); +} + +void ctgFreeMsgCtx(SCtgMsgCtx* pCtx) { + taosMemoryFreeClear(pCtx->target); + if (NULL == pCtx->out) { + return; + } + + switch (pCtx->reqType) { + case TDMT_MND_GET_DB_CFG: { + SDbCfgInfo* pOut = (SDbCfgInfo*)pCtx->out; + taosArrayDestroy(pOut->pRetensions); + taosMemoryFreeClear(pCtx->out); + break; + } + case TDMT_MND_USE_DB:{ + SUseDbOutput* pOut = (SUseDbOutput*)pCtx->out; + ctgFreeSUseDbOutput(pOut); + pCtx->out = NULL; + break; + } + case TDMT_MND_GET_INDEX: { + SIndexInfo* pOut = (SIndexInfo*)pCtx->out; + taosMemoryFreeClear(pCtx->out); + break; + } + case TDMT_MND_QNODE_LIST: { + SArray* pOut = (SArray*)pCtx->out; + taosArrayDestroy(pOut); + pCtx->out = NULL; + break; + } + case TDMT_VND_TABLE_META: + case TDMT_MND_TABLE_META: { + STableMetaOutput* pOut = (STableMetaOutput*)pCtx->out; + taosMemoryFree(pOut->tbMeta); + taosMemoryFreeClear(pCtx->out); + break; + } + case TDMT_MND_RETRIEVE_FUNC: { + SFuncInfo* pOut = (SFuncInfo*)pCtx->out; + taosMemoryFree(pOut->pCode); + taosMemoryFree(pOut->pComment); + taosMemoryFreeClear(pCtx->out); + break; + } + case TDMT_MND_GET_USER_AUTH: { + SGetUserAuthRsp* pOut = (SGetUserAuthRsp*)pCtx->out; + taosHashCleanup(pOut->createdDbs); + taosHashCleanup(pOut->readDbs); + taosHashCleanup(pOut->writeDbs); + taosMemoryFreeClear(pCtx->out); + break; + } + default: + qError("invalid reqType %d", pCtx->reqType); + break; + } +} + +void ctgFreeSTableMetaOutput(STableMetaOutput* pOutput) { + if (NULL == pOutput) { + return; + } + + taosMemoryFree(pOutput->tbMeta); + taosMemoryFree(pOutput); +} + + +void ctgResetTbMetaTask(SCtgTask* pTask) { + SCtgTbMetaCtx* taskCtx = (SCtgTbMetaCtx*)pTask->taskCtx; + memset(&taskCtx->tbInfo, 0, sizeof(taskCtx->tbInfo)); + taskCtx->flag = CTG_FLAG_UNKNOWN_STB; + + if (pTask->msgCtx.lastOut) { + ctgFreeSTableMetaOutput((STableMetaOutput*)pTask->msgCtx.lastOut); + pTask->msgCtx.lastOut = NULL; + } + if (pTask->msgCtx.out) { + ctgFreeSTableMetaOutput((STableMetaOutput*)pTask->msgCtx.out); + pTask->msgCtx.out = NULL; + } + taosMemoryFreeClear(pTask->msgCtx.target); + taosMemoryFreeClear(pTask->res); +} + +void ctgFreeTask(SCtgTask* pTask) { + ctgFreeMsgCtx(&pTask->msgCtx); + + switch (pTask->type) { + case CTG_TASK_GET_QNODE: { + taosArrayDestroy((SArray*)pTask->res); + taosMemoryFreeClear(pTask->taskCtx); + pTask->res = NULL; + break; + } + case CTG_TASK_GET_TB_META: { + SCtgTbMetaCtx* taskCtx = (SCtgTbMetaCtx*)pTask->taskCtx; + taosMemoryFreeClear(taskCtx->pName); + if (pTask->msgCtx.lastOut) { + ctgFreeSTableMetaOutput((STableMetaOutput*)pTask->msgCtx.lastOut); + pTask->msgCtx.lastOut = NULL; + } + taosMemoryFreeClear(pTask->taskCtx); + taosMemoryFreeClear(pTask->res); + break; + } + case CTG_TASK_GET_DB_VGROUP: { + taosArrayDestroy((SArray*)pTask->res); + taosMemoryFreeClear(pTask->taskCtx); + pTask->res = NULL; + break; + } + case CTG_TASK_GET_DB_CFG: { + taosMemoryFreeClear(pTask->taskCtx); + taosMemoryFreeClear(pTask->res); + break; + } + case CTG_TASK_GET_DB_INFO: { + taosMemoryFreeClear(pTask->taskCtx); + taosMemoryFreeClear(pTask->res); + break; + } + case CTG_TASK_GET_TB_HASH: { + SCtgTbHashCtx* taskCtx = (SCtgTbHashCtx*)pTask->taskCtx; + taosMemoryFreeClear(taskCtx->pName); + taosMemoryFreeClear(pTask->taskCtx); + taosMemoryFreeClear(pTask->res); + break; + } + case CTG_TASK_GET_INDEX: { + taosMemoryFreeClear(pTask->taskCtx); + taosMemoryFreeClear(pTask->res); + break; + } + case CTG_TASK_GET_UDF: { + taosMemoryFreeClear(pTask->taskCtx); + taosMemoryFreeClear(pTask->res); + break; + } + case CTG_TASK_GET_USER: { + taosMemoryFreeClear(pTask->taskCtx); + taosMemoryFreeClear(pTask->res); + break; + } + default: + qError("invalid task type %d", pTask->type); + break; + } +} + +void ctgFreeTasks(SArray* pArray) { + if (NULL == pArray) { + return; + } + + int32_t num = taosArrayGetSize(pArray); + for (int32_t i = 0; i < num; ++i) { + SCtgTask* pTask = taosArrayGet(pArray, i); + ctgFreeTask(pTask); + } + + taosArrayDestroy(pArray); +} + +void ctgFreeJob(void* job) { + if (NULL == job) { + return; + } + + SCtgJob* pJob = (SCtgJob*)job; + + int64_t rid = pJob->refId; + uint64_t qid = pJob->queryId; + + ctgFreeTasks(pJob->pTasks); + + ctgFreeSMetaData(&pJob->jobRes); + + taosMemoryFree(job); + + qDebug("QID:%" PRIx64 ", job %" PRIx64 " freed", qid, rid); +} + +int32_t ctgUpdateMsgCtx(SCtgMsgCtx* pCtx, int32_t reqType, void* out, char* target) { + ctgFreeMsgCtx(pCtx); + + pCtx->reqType = reqType; + pCtx->out = out; + if (target) { + pCtx->target = strdup(target); + if (NULL == pCtx->target) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + } else { + pCtx->target = NULL; + } + + return TSDB_CODE_SUCCESS; +} + + +int32_t ctgGetHashFunction(int8_t hashMethod, tableNameHashFp *fp) { + switch (hashMethod) { + default: + *fp = MurmurHash3_32; + break; + } + + return TSDB_CODE_SUCCESS; +} + +int32_t ctgGenerateVgList(SCatalog *pCtg, SHashObj *vgHash, SArray** pList) { + SHashObj *vgroupHash = NULL; + SVgroupInfo *vgInfo = NULL; + SArray *vgList = NULL; + int32_t code = 0; + int32_t vgNum = taosHashGetSize(vgHash); + + vgList = taosArrayInit(vgNum, sizeof(SVgroupInfo)); + if (NULL == vgList) { + ctgError("taosArrayInit failed, num:%d", vgNum); + CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); + } + + void *pIter = taosHashIterate(vgHash, NULL); + while (pIter) { + vgInfo = pIter; + + if (NULL == taosArrayPush(vgList, vgInfo)) { + ctgError("taosArrayPush failed, vgId:%d", vgInfo->vgId); + taosHashCancelIterate(vgHash, pIter); + CTG_ERR_JRET(TSDB_CODE_CTG_MEM_ERROR); + } + + pIter = taosHashIterate(vgHash, pIter); + vgInfo = NULL; + } + + *pList = vgList; + + ctgDebug("Got vgList from cache, vgNum:%d", vgNum); + + return TSDB_CODE_SUCCESS; + +_return: + + if (vgList) { + taosArrayDestroy(vgList); + } + + CTG_RET(code); +} + + +int32_t ctgGetVgInfoFromHashValue(SCatalog *pCtg, SDBVgInfo *dbInfo, const SName *pTableName, SVgroupInfo *pVgroup) { + int32_t code = 0; + + int32_t vgNum = taosHashGetSize(dbInfo->vgHash); + char db[TSDB_DB_FNAME_LEN] = {0}; + tNameGetFullDbName(pTableName, db); + + if (vgNum <= 0) { + ctgError("db vgroup cache invalid, db:%s, vgroup number:%d", db, vgNum); + CTG_ERR_RET(TSDB_CODE_TSC_DB_NOT_SELECTED); + } + + tableNameHashFp fp = NULL; + SVgroupInfo *vgInfo = NULL; + + CTG_ERR_RET(ctgGetHashFunction(dbInfo->hashMethod, &fp)); + + char tbFullName[TSDB_TABLE_FNAME_LEN]; + tNameExtractFullName(pTableName, tbFullName); + + uint32_t hashValue = (*fp)(tbFullName, (uint32_t)strlen(tbFullName)); + + void *pIter = taosHashIterate(dbInfo->vgHash, NULL); + while (pIter) { + vgInfo = pIter; + if (hashValue >= vgInfo->hashBegin && hashValue <= vgInfo->hashEnd) { + taosHashCancelIterate(dbInfo->vgHash, pIter); + break; + } + + pIter = taosHashIterate(dbInfo->vgHash, pIter); + vgInfo = NULL; + } + + if (NULL == vgInfo) { + ctgError("no hash range found for hash value [%u], db:%s, numOfVgId:%d", hashValue, db, taosHashGetSize(dbInfo->vgHash)); + CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR); + } + + *pVgroup = *vgInfo; + + CTG_RET(code); +} + +int32_t ctgStbVersionSearchCompare(const void* key1, const void* key2) { + if (*(uint64_t *)key1 < ((SSTableMetaVersion*)key2)->suid) { + return -1; + } else if (*(uint64_t *)key1 > ((SSTableMetaVersion*)key2)->suid) { + return 1; + } else { + return 0; + } +} + +int32_t ctgDbVgVersionSearchCompare(const void* key1, const void* key2) { + if (*(int64_t *)key1 < ((SDbVgVersion*)key2)->dbId) { + return -1; + } else if (*(int64_t *)key1 > ((SDbVgVersion*)key2)->dbId) { + return 1; + } else { + return 0; + } +} + +int32_t ctgStbVersionSortCompare(const void* key1, const void* key2) { + if (((SSTableMetaVersion*)key1)->suid < ((SSTableMetaVersion*)key2)->suid) { + return -1; + } else if (((SSTableMetaVersion*)key1)->suid > ((SSTableMetaVersion*)key2)->suid) { + return 1; + } else { + return 0; + } +} + +int32_t ctgDbVgVersionSortCompare(const void* key1, const void* key2) { + if (((SDbVgVersion*)key1)->dbId < ((SDbVgVersion*)key2)->dbId) { + return -1; + } else if (((SDbVgVersion*)key1)->dbId > ((SDbVgVersion*)key2)->dbId) { + return 1; + } else { + return 0; + } +} + + + + +int32_t ctgCloneVgInfo(SDBVgInfo *src, SDBVgInfo **dst) { + *dst = taosMemoryMalloc(sizeof(SDBVgInfo)); + if (NULL == *dst) { + qError("malloc %d failed", (int32_t)sizeof(SDBVgInfo)); + CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); + } + + memcpy(*dst, src, sizeof(SDBVgInfo)); + + size_t hashSize = taosHashGetSize(src->vgHash); + (*dst)->vgHash = taosHashInit(hashSize, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK); + if (NULL == (*dst)->vgHash) { + qError("taosHashInit %d failed", (int32_t)hashSize); + taosMemoryFreeClear(*dst); + CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); + } + + int32_t *vgId = NULL; + void *pIter = taosHashIterate(src->vgHash, NULL); + while (pIter) { + vgId = taosHashGetKey(pIter, NULL); + + if (taosHashPut((*dst)->vgHash, (void *)vgId, sizeof(int32_t), pIter, sizeof(SVgroupInfo))) { + qError("taosHashPut failed, hashSize:%d", (int32_t)hashSize); + taosHashCancelIterate(src->vgHash, pIter); + taosHashCleanup((*dst)->vgHash); + taosMemoryFreeClear(*dst); + CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); + } + + pIter = taosHashIterate(src->vgHash, pIter); + } + + + return TSDB_CODE_SUCCESS; +} + + + +int32_t ctgCloneMetaOutput(STableMetaOutput *output, STableMetaOutput **pOutput) { + *pOutput = taosMemoryMalloc(sizeof(STableMetaOutput)); + if (NULL == *pOutput) { + qError("malloc %d failed", (int32_t)sizeof(STableMetaOutput)); + CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); + } + + memcpy(*pOutput, output, sizeof(STableMetaOutput)); + + if (output->tbMeta) { + int32_t metaSize = CTG_META_SIZE(output->tbMeta); + (*pOutput)->tbMeta = taosMemoryMalloc(metaSize); + if (NULL == (*pOutput)->tbMeta) { + qError("malloc %d failed", (int32_t)sizeof(STableMetaOutput)); + taosMemoryFreeClear(*pOutput); + CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); + } + + memcpy((*pOutput)->tbMeta, output->tbMeta, metaSize); + } + + return TSDB_CODE_SUCCESS; +} + + + diff --git a/source/libs/catalog/test/catalogTests.cpp b/source/libs/catalog/test/catalogTests.cpp index cff0087d6ccc7dbab5f690ffe1cecc27ec4813b7..81d206a0f3fee7f33f24b9740c973ab8d89b10d1 100644 --- a/source/libs/catalog/test/catalogTests.cpp +++ b/source/libs/catalog/test/catalogTests.cpp @@ -40,10 +40,7 @@ namespace { -extern "C" int32_t ctgGetTableMetaFromCache(struct SCatalog *pCatalog, const SName *pTableName, STableMeta **pTableMeta, - bool *inCache, int32_t flag, uint64_t *dbId); extern "C" int32_t ctgdGetClusterCacheNum(struct SCatalog* pCatalog, int32_t type); -extern "C" int32_t ctgActUpdateTbl(SCtgMetaAction *action); extern "C" int32_t ctgdEnableDebug(char *option); extern "C" int32_t ctgdGetStatNum(char *option, void *res); @@ -52,7 +49,7 @@ void ctgTestSetRspCTableMeta(); void ctgTestSetRspSTableMeta(); void ctgTestSetRspMultiSTableMeta(); -extern "C" SCatalogMgmt gCtgMgmt; +//extern "C" SCatalogMgmt gCtgMgmt; enum { CTGT_RSP_VGINFO = 1, @@ -859,8 +856,12 @@ void *ctgTestGetCtableMetaThread(void *param) { strcpy(cn.dbname, "db1"); strcpy(cn.tname, ctgTestCTablename); + SCtgTbMetaCtx ctx = {0}; + ctx.pName = &cn; + ctx.flag = CTG_FLAG_UNKNOWN_STB; + while (!ctgTestStop) { - code = ctgGetTableMetaFromCache(pCtg, &cn, &tbMeta, &inCache, 0, NULL); + code = ctgReadTbMetaFromCache(pCtg, &ctx, &tbMeta); if (code || !inCache) { assert(0); } @@ -886,9 +887,9 @@ void *ctgTestSetCtableMetaThread(void *param) { int32_t n = 0; STableMetaOutput *output = NULL; - SCtgMetaAction action = {0}; + SCtgCacheOperation operation = {0}; - action.act = CTG_ACT_UPDATE_TBL; + operation.opId = CTG_OP_UPDATE_TB_META; while (!ctgTestStop) { output = (STableMetaOutput *)taosMemoryMalloc(sizeof(STableMetaOutput)); @@ -897,9 +898,9 @@ void *ctgTestSetCtableMetaThread(void *param) { SCtgUpdateTblMsg *msg = (SCtgUpdateTblMsg *)taosMemoryMalloc(sizeof(SCtgUpdateTblMsg)); msg->pCtg = pCtg; msg->output = output; - action.data = msg; + operation.data = msg; - code = ctgActUpdateTbl(&action); + code = ctgOpUpdateTbMeta(&operation); if (code) { assert(0); } diff --git a/source/libs/command/inc/commandInt.h b/source/libs/command/inc/commandInt.h index 775dee28a4e2528cbf8509cb5955d567b658a5dd..100e35bc3c61015c1c109adef95851de73d1e3a0 100644 --- a/source/libs/command/inc/commandInt.h +++ b/source/libs/command/inc/commandInt.h @@ -36,6 +36,8 @@ extern "C" { #define EXPLAIN_SORT_FORMAT "Sort" #define EXPLAIN_INTERVAL_FORMAT "Interval on Column %s" #define EXPLAIN_SESSION_FORMAT "Session" +#define EXPLAIN_STATE_WINDOW_FORMAT "StateWindow on Column %s" +#define EXPLAIN_PARITION_FORMAT "Partition on Column %s" #define EXPLAIN_ORDER_FORMAT "Order: %s" #define EXPLAIN_FILTER_FORMAT "Filter: " #define EXPLAIN_FILL_FORMAT "Fill: %s" @@ -60,7 +62,7 @@ extern "C" { #define EXPLAIN_GROUPS_FORMAT "groups=%d" #define EXPLAIN_WIDTH_FORMAT "width=%d" #define EXPLAIN_FUNCTIONS_FORMAT "functions=%d" -#define EXPLAIN_EXECINFO_FORMAT "cost=%" PRIu64 "..%" PRIu64 " rows=%" PRIu64 +#define EXPLAIN_EXECINFO_FORMAT "cost=%.3f..%.3f rows=%" PRIu64 typedef struct SExplainGroup { int32_t nodeNum; diff --git a/source/libs/command/src/command.c b/source/libs/command/src/command.c index 621ea7b7fc7483dd6b073cf6301bf373e154e634..3034b4b02a293dd4cce91814bde367682db6ac8d 100644 --- a/source/libs/command/src/command.c +++ b/source/libs/command/src/command.c @@ -21,6 +21,7 @@ static int32_t getSchemaBytes(const SSchema* pSchema) { case TSDB_DATA_TYPE_BINARY: return (pSchema->bytes - VARSTR_HEADER_SIZE); case TSDB_DATA_TYPE_NCHAR: + case TSDB_DATA_TYPE_JSON: return (pSchema->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE; default: return pSchema->bytes; diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c index 2e94ec8d0c3a79f7068580e95f30373aeff6ac5f..831b7017b2632a3e52e3050c08b2c29ffa463eeb 100644 --- a/source/libs/command/src/explain.c +++ b/source/libs/command/src/explain.c @@ -16,6 +16,7 @@ #include "commandInt.h" #include "plannodes.h" #include "query.h" +#include "tcommon.h" int32_t qExplainGenerateResNode(SPhysiNode *pNode, SExplainGroup *group, SExplainResNode **pRes); int32_t qExplainAppendGroupResRows(void *pCtx, int32_t groupId, int32_t level); @@ -162,6 +163,16 @@ int32_t qExplainGenerateResChildren(SPhysiNode *pNode, SExplainGroup *group, SNo pPhysiChildren = pSessNode->window.node.pChildren; break; } + case QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW: { + SStateWinodwPhysiNode* pStateNode = (SStateWinodwPhysiNode*) pNode; + pPhysiChildren = pStateNode->window.node.pChildren; + break; + } + case QUERY_NODE_PHYSICAL_PLAN_PARTITION: { + SPartitionPhysiNode* partitionPhysiNode = (SPartitionPhysiNode*) pNode; + pPhysiChildren = partitionPhysiNode->node.pChildren; + break; + } default: qError("not supported physical node type %d", pNode->type); QRY_ERR_RET(TSDB_CODE_QRY_APP_ERROR); @@ -339,7 +350,6 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, pTagScanNode->pScanCols->length); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pTagScanNode->node.pOutputDataBlockDesc->totalRowSize); - EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); @@ -381,6 +391,35 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); + // basic analyze output + if (EXPLAIN_MODE_ANALYZE == ctx->mode) { + EXPLAIN_ROW_NEW(level + 1, "I/O: "); + + int32_t nodeNum = taosArrayGetSize(pResNode->pExecInfo); + for (int32_t i = 0; i < nodeNum; ++i) { + SExplainExecInfo * execInfo = taosArrayGet(pResNode->pExecInfo, i); + STableScanAnalyzeInfo *pScanInfo = (STableScanAnalyzeInfo *)execInfo->verboseInfo; + + EXPLAIN_ROW_APPEND("total_blocks=%d", pScanInfo->totalBlocks); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + + EXPLAIN_ROW_APPEND("load_blocks=%d", pScanInfo->loadBlocks); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + + EXPLAIN_ROW_APPEND("load_block_SMAs=%d", pScanInfo->loadBlockStatis); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + + EXPLAIN_ROW_APPEND("total_rows=%" PRIu64, pScanInfo->totalRows); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + + EXPLAIN_ROW_APPEND("check_rows=%" PRIu64, pScanInfo->totalCheckedRows); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + } + + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + } + if (verbose) { EXPLAIN_ROW_NEW(level + 1, EXPLAIN_OUTPUT_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, @@ -390,8 +429,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - EXPLAIN_ROW_NEW(level + 1, EXPLAIN_TIMERANGE_FORMAT, pTblScanNode->scanRange.skey, - pTblScanNode->scanRange.ekey); + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_TIMERANGE_FORMAT, pTblScanNode->scanRange.skey, pTblScanNode->scanRange.ekey); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); @@ -522,8 +560,10 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); } - EXPLAIN_ROW_APPEND(EXPLAIN_FUNCTIONS_FORMAT, pAggNode->pAggFuncs->length); - EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + if (pAggNode->pAggFuncs) { + EXPLAIN_ROW_APPEND(EXPLAIN_FUNCTIONS_FORMAT, pAggNode->pAggFuncs->length); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + } EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pAggNode->node.pOutputDataBlockDesc->totalRowSize); if (pAggNode->pGroupKeys) { EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); @@ -600,13 +640,48 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); } - EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, pSortNode->pSortKeys->length); + + SDataBlockDescNode* pDescNode = pSortNode->node.pOutputDataBlockDesc; + EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, nodesGetOutputNumFromSlotList(pDescNode->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); - EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pSortNode->node.pOutputDataBlockDesc->totalRowSize); + EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pDescNode->totalRowSize); EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); + if (EXPLAIN_MODE_ANALYZE == ctx->mode) { + // sort key + EXPLAIN_ROW_NEW(level, "Sort Key: "); + if (pResNode->pExecInfo) { + for (int32_t i = 0; i < LIST_LENGTH(pSortNode->pSortKeys); ++i) { + SOrderByExprNode *ptn = nodesListGetNode(pSortNode->pSortKeys, i); + EXPLAIN_ROW_APPEND("%s ", nodesGetNameFromColumnNode(ptn->pExpr)); + } + } + + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); + + // sort method + EXPLAIN_ROW_NEW(level, "Sort Method: "); + + int32_t nodeNum = taosArrayGetSize(pResNode->pExecInfo); + SExplainExecInfo *execInfo = taosArrayGet(pResNode->pExecInfo, 0); + SSortExecInfo * pExecInfo = (SSortExecInfo *)execInfo->verboseInfo; + EXPLAIN_ROW_APPEND("%s", pExecInfo->sortMethod == SORT_QSORT_T ? "quicksort" : "merge sort"); + if (pExecInfo->sortBuffer > 1024 * 1024) { + EXPLAIN_ROW_APPEND(" Buffers:%.2f Mb", pExecInfo->sortBuffer / (1024 * 1024.0)); + } else if (pExecInfo->sortBuffer > 1024) { + EXPLAIN_ROW_APPEND(" Buffers:%.2f Kb", pExecInfo->sortBuffer / (1024.0)); + } else { + EXPLAIN_ROW_APPEND(" Buffers:%d b", pExecInfo->sortBuffer); + } + + EXPLAIN_ROW_APPEND(" loops:%d", pExecInfo->loops); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); + } + if (verbose) { EXPLAIN_ROW_NEW(level + 1, EXPLAIN_OUTPUT_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, @@ -637,6 +712,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i EXPLAIN_ROW_APPEND(EXPLAIN_FUNCTIONS_FORMAT, pIntNode->window.pFuncs->length); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pIntNode->window.node.pOutputDataBlockDesc->totalRowSize); + EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); @@ -705,6 +781,80 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i } break; } + case QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW: { + SStateWinodwPhysiNode *pStateNode = (SStateWinodwPhysiNode *)pNode; + + EXPLAIN_ROW_NEW(level, EXPLAIN_STATE_WINDOW_FORMAT, nodesGetNameFromColumnNode(((STargetNode*)pStateNode->pStateKey)->pExpr)); + EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT); + if (pResNode->pExecInfo) { + QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + } + + EXPLAIN_ROW_APPEND(EXPLAIN_FUNCTIONS_FORMAT, pStateNode->window.pFuncs->length); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pStateNode->window.node.pOutputDataBlockDesc->totalRowSize); + EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); + + if (verbose) { + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_OUTPUT_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, + nodesGetOutputNumFromSlotList(pStateNode->window.node.pOutputDataBlockDesc->pSlots)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pStateNode->window.node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + + if (pStateNode->window.node.pConditions) { + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT); + QRY_ERR_RET(nodesNodeToSQL(pStateNode->window.node.pConditions, tbuf + VARSTR_HEADER_SIZE, + TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + } + } + break; + } + case QUERY_NODE_PHYSICAL_PLAN_PARTITION: { + SPartitionPhysiNode *pPartNode = (SPartitionPhysiNode *)pNode; + + SNode* p = nodesListGetNode(pPartNode->pPartitionKeys, 0); + EXPLAIN_ROW_NEW(level, EXPLAIN_PARITION_FORMAT, nodesGetNameFromColumnNode(p)); + EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT); + if (pResNode->pExecInfo) { + QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + } + EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pPartNode->node.pOutputDataBlockDesc->totalRowSize); + + EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); + + if (verbose) { + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_OUTPUT_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, + nodesGetOutputNumFromSlotList(pPartNode->node.pOutputDataBlockDesc->pSlots)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pPartNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + + if (pPartNode->node.pConditions) { + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT); + QRY_ERR_RET(nodesNodeToSQL(pPartNode->node.pConditions, tbuf + VARSTR_HEADER_SIZE, + TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + } + } + break; + } default: qError("not supported physical node type %d", pNode->type); return TSDB_CODE_QRY_APP_ERROR; diff --git a/source/libs/executor/inc/executil.h b/source/libs/executor/inc/executil.h index 63c398618f38446260124978a803b2a63c6f0688..b8975854c9446eab43cd4a7d8c3ccb6e38b93016 100644 --- a/source/libs/executor/inc/executil.h +++ b/source/libs/executor/inc/executil.h @@ -75,15 +75,15 @@ typedef struct SResultRowInfo { int32_t size; // number of result set int32_t capacity; // max capacity SResultRowPosition cur; + SList* openWindow; } SResultRowInfo; struct SqlFunctionCtx; -size_t getResultRowSize(struct SqlFunctionCtx* pCtx, int32_t numOfOutput); +size_t getResultRowSize(struct SqlFunctionCtx* pCtx, int32_t numOfOutput); int32_t initResultRowInfo(SResultRowInfo* pResultRowInfo, int32_t size); void cleanupResultRowInfo(SResultRowInfo* pResultRowInfo); -int32_t numOfClosedResultRows(SResultRowInfo* pResultRowInfo); void closeAllResultRows(SResultRowInfo* pResultRowInfo); void initResultRow(SResultRow *pResultRow); @@ -92,15 +92,6 @@ bool isResultRowClosed(SResultRow* pResultRow); struct SResultRowEntryInfo* getResultCell(const SResultRow* pRow, int32_t index, const int32_t* offset); -static FORCE_INLINE SResultRow *getResultRow(SDiskbasedBuf* pBuf, SResultRowInfo *pResultRowInfo, int32_t slot) { - ASSERT(pResultRowInfo != NULL && slot >= 0 && slot < pResultRowInfo->size); - SResultRowPosition* pos = &pResultRowInfo->pPosition[slot]; - - SFilePage* bufPage = (SFilePage*) getBufPage(pBuf, pos->pageId); - SResultRow* pRow = (SResultRow*)((char*)bufPage + pos->offset); - return pRow; -} - static FORCE_INLINE SResultRow *getResultRowByPos(SDiskbasedBuf* pBuf, SResultRowPosition* pos) { SFilePage* bufPage = (SFilePage*) getBufPage(pBuf, pos->pageId); SResultRow* pRow = (SResultRow*)((char*)bufPage + pos->offset); diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index 0dacbba8e590b010405571727775cdcda983629c..0b3e75f08169c433390378401556d603a78ae82f 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -49,7 +49,7 @@ typedef int32_t (*__block_search_fn_t)(char* data, int32_t num, int64_t key, int #define Q_STATUS_EQUAL(p, s) (((p) & (s)) != 0u) #define QUERY_IS_ASC_QUERY(q) (GET_FORWARD_DIRECTION_FACTOR((q)->order.order) == QUERY_ASC_FORWARD_STEP) -#define GET_TABLEGROUP(q, _index) ((SArray*)taosArrayGetP((q)->tableqinfoGroupInfo.pGroupList, (_index))) +//#define GET_TABLEGROUP(q, _index) ((SArray*)taosArrayGetP((q)->tableqinfoGroupInfo.pGroupList, (_index))) #define NEEDTO_COMPRESS_QUERY(size) ((size) > tsCompressColData ? 1 : 0) @@ -86,49 +86,16 @@ typedef struct STableQueryInfo { // SVariant tag; } STableQueryInfo; -typedef enum { - QUERY_PROF_BEFORE_OPERATOR_EXEC = 0, - QUERY_PROF_AFTER_OPERATOR_EXEC, - QUERY_PROF_QUERY_ABORT -} EQueryProfEventType; - -typedef struct { - EQueryProfEventType eventType; - int64_t eventTime; - - union { - uint8_t operatorType; // for operator event - int32_t abortCode; // for query abort event - }; -} SQueryProfEvent; - -typedef struct { - uint8_t operatorType; - int64_t sumSelfTime; - int64_t sumRunTimes; -} SOperatorProfResult; - typedef struct SLimit { int64_t limit; int64_t offset; } SLimit; -typedef struct SFileBlockLoadRecorder { - uint64_t totalRows; - uint64_t totalCheckedRows; - uint32_t totalBlocks; - uint32_t loadBlocks; - uint32_t loadBlockStatis; - uint32_t skipBlocks; - uint32_t filterOutBlocks; - uint64_t elapsedTime; -} SFileBlockLoadRecorder; +typedef struct STableScanAnalyzeInfo SFileBlockLoadRecorder; typedef struct STaskCostInfo { - int64_t created; - int64_t start; - int64_t end; - + int64_t created; + int64_t start; uint64_t loadStatisTime; uint64_t loadFileBlockTime; uint64_t loadDataInCacheTime; @@ -152,8 +119,8 @@ typedef struct STaskCostInfo { } STaskCostInfo; typedef struct SOperatorCostInfo { - uint64_t openCost; - uint64_t totalCost; + double openCost; + double totalCost; } SOperatorCostInfo; // The basic query information extracted from the SQueryInfo tree to support the @@ -184,23 +151,21 @@ typedef struct STaskAttr { int32_t numOfFilterCols; int64_t* fillVal; void* tsdb; - STableGroupInfo tableGroupInfo; // table list SArray +// STableListInfo tableGroupInfo; // table list int32_t vgId; } STaskAttr; struct SOperatorInfo; -struct SAggSupporter; -struct SOptrBasicInfo; +//struct SAggSupporter; +//struct SOptrBasicInfo; -typedef void (*__optr_encode_fn_t)(struct SOperatorInfo* pOperator, struct SAggSupporter* pSup, - struct SOptrBasicInfo* pInfo, char** result, int32_t* length); -typedef bool (*__optr_decode_fn_t)(struct SOperatorInfo* pOperator, struct SAggSupporter* pSup, - struct SOptrBasicInfo* pInfo, char* result, int32_t length); +typedef int32_t (*__optr_encode_fn_t)(struct SOperatorInfo* pOperator, char** result, int32_t* length); +typedef int32_t (*__optr_decode_fn_t)(struct SOperatorInfo* pOperator, char* result); typedef int32_t (*__optr_open_fn_t)(struct SOperatorInfo* pOptr); typedef SSDataBlock* (*__optr_fn_t)(struct SOperatorInfo* pOptr); typedef void (*__optr_close_fn_t)(void* param, int32_t num); -typedef int32_t (*__optr_get_explain_fn_t)(struct SOperatorInfo* pOptr, void** pOptrExplain); +typedef int32_t (*__optr_explain_fn_t)(struct SOperatorInfo* pOptr, void** pOptrExplain, uint32_t* len); typedef struct STaskIdInfo { uint64_t queryId; // this is also a request id @@ -216,7 +181,7 @@ typedef struct SExecTaskInfo { STaskCostInfo cost; int64_t owner; // if it is in execution int32_t code; - uint64_t totalRows; // total number of rows +// uint64_t totalRows; // total number of rows struct { char *tablename; char *dbname; @@ -224,7 +189,7 @@ typedef struct SExecTaskInfo { int32_t tversion; } schemaVer; - STableGroupInfo tableqinfoGroupInfo; // this is a group array list, including SArray structure + STableListInfo tableqinfoList; // this is a table list char* sql; // query sql string jmp_buf env; // jump to this position when error happens. EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model] @@ -246,7 +211,7 @@ typedef struct STaskRuntimeEnv { STSCursor cur; char* tagVal; // tag value of current data block - STableGroupInfo tableqinfoGroupInfo; // this is a group array list, including SArray structure +// STableGroupInfo tableqinfoGroupInfo; // this is a table list struct SOperatorInfo* proot; SGroupResInfo groupResInfo; int64_t currentOffset; // dynamic offset value @@ -264,14 +229,14 @@ enum { }; typedef struct SOperatorFpSet { - __optr_open_fn_t _openFn; // DO NOT invoke this function directly - __optr_fn_t getNextFn; - __optr_fn_t getStreamResFn; // execute the aggregate in the stream model, todo remove it - __optr_fn_t cleanupFn; // call this function to release the allocated resources ASAP - __optr_close_fn_t closeFn; - __optr_encode_fn_t encodeResultRow; - __optr_decode_fn_t decodeResultRow; - __optr_get_explain_fn_t getExplainFn; + __optr_open_fn_t _openFn; // DO NOT invoke this function directly + __optr_fn_t getNextFn; + __optr_fn_t getStreamResFn; // execute the aggregate in the stream model, todo remove it + __optr_fn_t cleanupFn; // call this function to release the allocated resources ASAP + __optr_close_fn_t closeFn; + __optr_encode_fn_t encodeResultRow; + __optr_decode_fn_t decodeResultRow; + __optr_explain_fn_t getExplainFn; } SOperatorFpSet; typedef struct SOperatorInfo { @@ -367,6 +332,8 @@ typedef struct STableScanInfo { int32_t dataBlockLoadFlag; double sampleRatio; // data block sample ratio, 1 by default SInterval interval; // if the upstream is an interval operator, the interval info is also kept here to get the time window to check if current data block needs to be loaded. + + int32_t curTWinIdx; } STableScanInfo; typedef struct STagScanInfo { @@ -375,7 +342,7 @@ typedef struct STagScanInfo { SArray *pColMatchInfo; int32_t curPos; SReadHandle readHandle; - STableGroupInfo *pTableGroups; + STableListInfo *pTableList; } STagScanInfo; typedef enum EStreamScanMode { @@ -392,40 +359,56 @@ typedef struct SCatchSupporter { int64_t* pKeyBuf; } SCatchSupporter; +typedef struct SStreamAggSupporter { + SArray* pResultRows; // SResultWindowInfo + int32_t keySize; + char* pKeyBuf; // window key buffer + SDiskbasedBuf* pResultBuf; // query result buffer based on blocked-wised disk file + int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row +} SStreamAggSupporter; + +typedef struct SessionWindowSupporter { + SStreamAggSupporter* pStreamAggSup; + int64_t gap; +} SessionWindowSupporter; + typedef struct SStreamBlockScanInfo { - SArray* pBlockLists; // multiple SSDatablock. - SSDataBlock* pRes; // result SSDataBlock - SSDataBlock* pUpdateRes; // update SSDataBlock - int32_t updateResIndex; - int32_t blockType; // current block type - int32_t validBlockIndex; // Is current data has returned? - SColumnInfo* pCols; // the output column info - uint64_t numOfRows; // total scanned rows - uint64_t numOfExec; // execution times - void* streamBlockReader;// stream block reader handle - SArray* pColMatchInfo; // - SNode* pCondition; - SArray* tsArray; - SUpdateInfo* pUpdateInfo; - int32_t primaryTsIndex; // primary time stamp slot id - void* pDataReader; - SReadHandle readHandle; - uint64_t tableUid; // queried super table uid + SArray* pBlockLists; // multiple SSDatablock. + SSDataBlock* pRes; // result SSDataBlock + SSDataBlock* pUpdateRes; // update SSDataBlock + int32_t updateResIndex; + int32_t blockType; // current block type + int32_t validBlockIndex; // Is current data has returned? + SColumnInfo* pCols; // the output column info + uint64_t numOfExec; // execution times + void* streamBlockReader;// stream block reader handle + SArray* pColMatchInfo; // + SNode* pCondition; + SArray* tsArray; + SUpdateInfo* pUpdateInfo; + + SExprInfo* pPseudoExpr; + int32_t numOfPseudoExpr; + + int32_t primaryTsIndex; // primary time stamp slot id + void* pDataReader; + SReadHandle readHandle; + uint64_t tableUid; // queried super table uid EStreamScanMode scanMode; SOperatorInfo* pOperatorDumy; SInterval interval; // if the upstream is an interval operator, the interval info is also kept here. - SCatchSupporter childAggSup; - SArray* childIds; + SArray* childIds; + SessionWindowSupporter sessionSup; + bool assignBlockUid; // assign block uid to groupId, temporarily used for generating rollup SMA. } SStreamBlockScanInfo; typedef struct SSysTableScanInfo { - SReadHandle readHandle; - SRetrieveMetaTableRsp* pRsp; SRetrieveTableReq req; SEpSet epSet; tsem_t ready; + SReadHandle readHandle; int32_t accountId; bool showRewrite; SNode* pCondition; // db_name filter condition, to discard data that are not in current database @@ -455,40 +438,49 @@ typedef struct SAggSupporter { typedef struct STimeWindowSupp { int8_t calTrigger; int64_t waterMark; + TSKEY maxTs; SColumnInfoData timeWindowData; // query time window info for scalar function execution. } STimeWindowAggSupp; typedef struct SIntervalAggOperatorInfo { + // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode SOptrBasicInfo binfo; // basic info + SAggSupporter aggSup; // aggregate supporter + SGroupResInfo groupResInfo; // multiple results build supporter SInterval interval; // interval info int32_t primaryTsIndex; // primary time stamp slot id from result of downstream operator. STimeWindow win; // query time range bool timeWindowInterpo; // interpolation needed or not char** pRow; // previous row/tuple of already processed datablock - SAggSupporter aggSup; // aggregate supporter + SArray* pInterpCols; // interpolation columns STableQueryInfo* pCurrent; // current tableQueryInfo struct int32_t order; // current SSDataBlock scan order EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model] SArray* pUpdatedWindow; // updated time window due to the input data block from the downstream operator. STimeWindowAggSupp twAggSup; - struct SFillInfo* pFillInfo; // fill info bool invertible; + SArray* pPrevValues; // SArray used to keep the previous not null value for interpolation. } SIntervalAggOperatorInfo; typedef struct SStreamFinalIntervalOperatorInfo { + // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode SOptrBasicInfo binfo; // basic info + SAggSupporter aggSup; // aggregate supporter + SGroupResInfo groupResInfo; // multiple results build supporter SInterval interval; // interval info int32_t primaryTsIndex; // primary time stamp slot id from result of downstream operator. - SAggSupporter aggSup; // aggregate supporter int32_t order; // current SSDataBlock scan order STimeWindowAggSupp twAggSup; + SArray* pChildren; } SStreamFinalIntervalOperatorInfo; typedef struct SAggOperatorInfo { + // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode SOptrBasicInfo binfo; SAggSupporter aggSup; + STableQueryInfo *current; uint64_t groupId; SGroupResInfo groupResInfo; @@ -501,8 +493,10 @@ typedef struct SAggOperatorInfo { } SAggOperatorInfo; typedef struct SProjectOperatorInfo { + // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode SOptrBasicInfo binfo; SAggSupporter aggSup; + SSDataBlock* existDataBlock; SArray* pPseudoColInfo; SLimit limit; @@ -526,7 +520,10 @@ typedef struct SFillOperatorInfo { } SFillOperatorInfo; typedef struct SGroupbyOperatorInfo { + // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode SOptrBasicInfo binfo; + SAggSupporter aggSup; + SArray* pGroupCols; // group by columns, SArray SArray* pGroupColVals; // current group column values, SArray SNode* pCondition; @@ -534,7 +531,6 @@ typedef struct SGroupbyOperatorInfo { char* keyBuf; // group by keys for hash int32_t groupKeyLen; // total group by column width SGroupResInfo groupResInfo; - SAggSupporter aggSup; SExprInfo* pScalarExprInfo; int32_t numOfScalarExpr; // the number of scalar expression in group operator SqlFunctionCtx* pScalarFuncCtx; @@ -571,8 +567,10 @@ typedef struct SWindowRowsSup { } SWindowRowsSup; typedef struct SSessionAggOperatorInfo { + // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode SOptrBasicInfo binfo; SAggSupporter aggSup; + SGroupResInfo groupResInfo; SWindowRowsSup winSup; bool reptScan; // next round scan @@ -581,6 +579,29 @@ typedef struct SSessionAggOperatorInfo { STimeWindowAggSupp twAggSup; } SSessionAggOperatorInfo; +typedef struct SResultWindowInfo { + SResultRowPosition pos; + STimeWindow win; + bool isOutput; + bool isClosed; +} SResultWindowInfo; + +typedef struct SStreamSessionAggOperatorInfo { + SOptrBasicInfo binfo; + SStreamAggSupporter streamAggSup; + SGroupResInfo groupResInfo; + int64_t gap; // session window gap + int32_t primaryTsIndex; // primary timestamp slot id + int32_t order; // current SSDataBlock scan order + STimeWindowAggSupp twAggSup; + SSDataBlock* pWinBlock; // window result + SqlFunctionCtx* pDummyCtx; // for combine + SSDataBlock* pDelRes; + SHashObj* pStDeleted; + void* pDelIterator; + SArray* pChildren; // cache for children's result; +} SStreamSessionAggOperatorInfo; + typedef struct STimeSliceOperatorInfo { SOptrBasicInfo binfo; SInterval interval; @@ -588,8 +609,10 @@ typedef struct STimeSliceOperatorInfo { } STimeSliceOperatorInfo; typedef struct SStateWindowOperatorInfo { + // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode SOptrBasicInfo binfo; SAggSupporter aggSup; + SGroupResInfo groupResInfo; SWindowRowsSup winSup; SColumn stateCol; // start row index @@ -601,8 +624,10 @@ typedef struct SStateWindowOperatorInfo { } SStateWindowOperatorInfo; typedef struct SSortedMergeOperatorInfo { - + // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode SOptrBasicInfo binfo; + SAggSupporter aggSup; + SArray* pSortInfo; int32_t numOfSources; SSortHandle *pSortHandle; @@ -614,23 +639,18 @@ typedef struct SSortedMergeOperatorInfo { int32_t numOfResPerPage; char** groupVal; SArray *groupInfo; - SAggSupporter aggSup; } SSortedMergeOperatorInfo; typedef struct SSortOperatorInfo { SOptrBasicInfo binfo; - uint32_t sortBufSize; // max buffer size for in-memory sort + uint32_t sortBufSize; // max buffer size for in-memory sort SArray* pSortInfo; SSortHandle* pSortHandle; SArray* pColMatchInfo; // for index map from table scan output int32_t bufPageSize; - // TODO extact struct - int64_t startTs; // sort start time - uint64_t sortElapsed; // sort elapsed time, time to flush to disk not included. - uint64_t totalSize; // total load bytes from remote - uint64_t totalRows; // total number of rows - uint64_t totalElapsed; // total elapsed time + int64_t startTs; // sort start time + uint64_t sortElapsed; // sort elapsed time, time to flush to disk not included. } SSortOperatorInfo; typedef struct STagFilterOperatorInfo { @@ -656,7 +676,7 @@ typedef struct SJoinOperatorInfo { SOperatorFpSet createOperatorFpSet(__optr_open_fn_t openFn, __optr_fn_t nextFn, __optr_fn_t streamFn, __optr_fn_t cleanup, __optr_close_fn_t closeFn, __optr_encode_fn_t encode, - __optr_decode_fn_t decode, __optr_get_explain_fn_t explain); + __optr_decode_fn_t decode, __optr_explain_fn_t explain); int32_t operatorDummyOpenFn(SOperatorInfo* pOperator); void operatorDummyCloseFn(void* param, int32_t numOfCols); @@ -676,6 +696,7 @@ int32_t setSDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLoadI SArray* pColList); void getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int64_t key, STimeWindow* win); int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t *order, int32_t* scanFlag); +int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaultBufsz); void doSetOperatorCompleted(SOperatorInfo* pOperator); void doFilter(const SNode* pFilterNode, SSDataBlock* pBlock, SArray* pColMatchInfo); @@ -708,7 +729,7 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, tsdbReaderT pDataReader, SReadHandle* pHandle, SExecTaskInfo* pTaskInfo); SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResultBlock, SExprInfo* pScalarExprInfo, - int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo, const STableGroupInfo* pTableGroupInfo); + int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo); SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t num, SSDataBlock* pResBlock, SLimit* pLimit, SLimit* pSlimit, SExecTaskInfo* pTaskInfo); SOperatorInfo *createSortOperatorInfo(SOperatorInfo* downstream, SSDataBlock* pResBlock, SArray* pSortInfo, SExprInfo* pExprInfo, int32_t numOfCols, @@ -721,26 +742,25 @@ SOperatorInfo* createSysTableScanOperatorInfo(void* pSysTableReadHandle, SSDataB SExecTaskInfo* pTaskInfo, bool showRewrite, int32_t accountId); SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId, - STimeWindowAggSupp *pTwAggSupp, const STableGroupInfo* pTableGroupInfo, SExecTaskInfo* pTaskInfo); + STimeWindowAggSupp *pTwAggSupp, SExecTaskInfo* pTaskInfo); SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId, - STimeWindowAggSupp *pTwAggSupp, const STableGroupInfo* pTableGroupInfo, SExecTaskInfo* pTaskInfo); + STimeWindowAggSupp *pTwAggSupp, SExecTaskInfo* pTaskInfo); SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, - SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId, - STimeWindowAggSupp *pTwAggSupp, const STableGroupInfo* pTableGroupInfo, SExecTaskInfo* pTaskInfo); + STimeWindowAggSupp *pTwAggSupp, SExecTaskInfo* pTaskInfo); SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, int64_t gap, int32_t tsSlotId, STimeWindowAggSupp* pTwAggSupp, SExecTaskInfo* pTaskInfo); SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResultBlock, SArray* pGroupColList, SNode* pCondition, - SExprInfo* pScalarExprInfo, int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo, - const STableGroupInfo* pTableGroupInfo); + SExprInfo* pScalarExprInfo, int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo); SOperatorInfo* createDataBlockInfoScanOperator(void* dataReader, SExecTaskInfo* pTaskInfo); -SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, void* pDataReader, SReadHandle* pHandle, - uint64_t uid, SSDataBlock* pResBlock, SArray* pColList, - SArray* pTableIdList, SExecTaskInfo* pTaskInfo, SNode* pCondition, - SOperatorInfo* pOperatorDumy); + +SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHandle, + SArray* pTableIdList, STableScanPhysiNode* pTableScanNode, SExecTaskInfo* pTaskInfo, + STimeWindowAggSupp* pTwSup, int16_t tsColId); + SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExpr, int32_t numOfCols, SInterval* pInterval, STimeWindow* pWindow, SSDataBlock* pResBlock, int32_t fillType, SNodeListNode* fillVal, @@ -749,15 +769,17 @@ SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInf SSDataBlock* pResBlock, STimeWindowAggSupp *pTwAggSupp, int32_t tsSlotId, SColumn* pStateKeyCol, SExecTaskInfo* pTaskInfo); SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, - SSDataBlock* pResultBlock, SArray* pGroupColList, SExecTaskInfo* pTaskInfo, - const STableGroupInfo* pTableGroupInfo); + SSDataBlock* pResultBlock, SArray* pGroupColList, SExecTaskInfo* pTaskInfo); SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResultBlock, SExecTaskInfo* pTaskInfo); SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SNode* pOnCondition, SExecTaskInfo* pTaskInfo); -SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, SExprInfo* pExpr, int32_t numOfOutput, SSDataBlock* pResBlock, SArray* pColMatchInfo, STableGroupInfo* pTableGroupInfo, SExecTaskInfo* pTaskInfo); +SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, SExprInfo* pExpr, int32_t numOfOutput, SSDataBlock* pResBlock, SArray* pColMatchInfo, STableListInfo* pTableGroupInfo, SExecTaskInfo* pTaskInfo); +SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, + SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, int64_t gap, + int32_t tsSlotId, STimeWindowAggSupp* pTwAggSupp, SExecTaskInfo* pTaskInfo); #if 0 SOperatorInfo* createTableSeqScanOperatorInfo(void* pTsdbReadHandle, STaskRuntimeEnv* pRuntimeEnv); #endif @@ -769,40 +791,55 @@ void setInputDataBlock(SOperatorInfo* pOperator, SqlFunctionCtx* pCtx, SSDataBlo void copyTsColoum(SSDataBlock* pRes, SqlFunctionCtx* pCtx, int32_t numOfOutput); -STableQueryInfo* createTableQueryInfo(void* buf, STimeWindow win); - bool isTaskKilled(SExecTaskInfo* pTaskInfo); int32_t checkForQueryBuf(size_t numOfTables); void setTaskKilled(SExecTaskInfo* pTaskInfo); - -void publishOperatorProfEvent(SOperatorInfo* operatorInfo, EQueryProfEventType eventType); -void publishQueryAbortEvent(SExecTaskInfo* pTaskInfo, int32_t code); - void queryCostStatis(SExecTaskInfo* pTaskInfo); void doDestroyTask(SExecTaskInfo* pTaskInfo); int32_t getMaximumIdleDurationSec(); +/* + * ops: root operator + * data: *data save the result of encode, need to be freed by caller + * length: *length save the length of *data + * return: result code, 0 means success + */ +int32_t encodeOperator(SOperatorInfo* ops, char** data, int32_t *length); + +/* + * ops: root operator, created by caller + * data: save the result of decode + * length: the length of data + * return: result code, 0 means success + */ +int32_t decodeOperator(SOperatorInfo* ops, char* data, int32_t length); + void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status); int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId, EOPTR_EXEC_MODEL model); int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SExplainExecInfo** pRes, int32_t* capacity, int32_t* resNum); -bool aggDecodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasicInfo* pInfo, char* result, - int32_t length); -void aggEncodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasicInfo* pInfo, char** result, - int32_t* length); +int32_t aggDecodeResultRow(SOperatorInfo* pOperator, char* result); +int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* length); + STimeWindow getActiveTimeWindow(SDiskbasedBuf* pBuf, SResultRowInfo* pResultRowInfo, int64_t ts, SInterval* pInterval, int32_t precision, STimeWindow* win); -int32_t getNumOfRowsInTimeWindow(SDataBlockInfo* pDataBlockInfo, TSKEY* pPrimaryColumn, int32_t startPos, - TSKEY ekey, __block_search_fn_t searchFn, STableQueryInfo* item, - int32_t order); +int32_t getNumOfRowsInTimeWindow(SDataBlockInfo* pDataBlockInfo, TSKEY* pPrimaryColumn, + int32_t startPos, TSKEY ekey, __block_search_fn_t searchFn, STableQueryInfo* item, + int32_t order); int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order); -int32_t initCatchSupporter(SCatchSupporter* pCatchSup, size_t rowSize, size_t keyBufSize, - const char* pKey, const char* pDir); - +int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey); +SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int32_t interBufSize); +SResultWindowInfo* getSessionTimeWindow(SArray* pWinInfos, TSKEY ts, int64_t gap, + int32_t* pIndex); +int32_t updateSessionWindowInfo(SResultWindowInfo* pWinInfo, TSKEY* pTs, int32_t rows, + int32_t start, int64_t gap, SHashObj* pStDeleted); +bool functionNeedToExecute(SqlFunctionCtx* pCtx); + +int32_t compareTimeWindow(const void* p1, const void* p2, const void* param); #ifdef __cplusplus } #endif diff --git a/source/libs/executor/inc/tsort.h b/source/libs/executor/inc/tsort.h index d74628a72fb4723d1837a0547574da414253bef6..c8b1b3ee513bc508de5187c8d39ace4ae5e4b7f8 100644 --- a/source/libs/executor/inc/tsort.h +++ b/source/libs/executor/inc/tsort.h @@ -137,6 +137,14 @@ void* tsortGetValue(STupleHandle* pVHandle, int32_t colId); */ SSDataBlock* tsortGetSortedDataBlock(const SSortHandle* pSortHandle); +/** + * return the sort execution information. + * + * @param pHandle + * @return + */ +SSortExecInfo tsortGetSortExecInfo(SSortHandle* pHandle); + #ifdef __cplusplus } #endif diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 5a02547f58aa4cf73c5297dda771ba0900bce141..1c45e38b632d29340472c1955d2b097377478ce0 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -101,20 +101,8 @@ void resetResultRowInfo(STaskRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRow pResultRowInfo->size = 0; } -int32_t numOfClosedResultRows(SResultRowInfo *pResultRowInfo) { - int32_t i = 0; -// while (i < pResultRowInfo->size && pResultRowInfo->pResult[i]->closed) { -// ++i; -// } - - return i; -} - void closeAllResultRows(SResultRowInfo *pResultRowInfo) { - assert(pResultRowInfo->size >= 0 && pResultRowInfo->capacity >= pResultRowInfo->size); - - for (int32_t i = 0; i < pResultRowInfo->size; ++i) { - } +// do nothing } bool isResultRowClosed(SResultRow* pRow) { @@ -233,7 +221,7 @@ void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SHashObj* pHashmap, int void initMultiResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayList) { if (pGroupResInfo->pRows != NULL) { - taosArrayDestroy(pGroupResInfo->pRows); + taosArrayDestroyP(pGroupResInfo->pRows, taosMemoryFree); } pGroupResInfo->pRows = pArrayList; @@ -258,32 +246,6 @@ int32_t getNumOfTotalRes(SGroupResInfo* pGroupResInfo) { return (int32_t) taosArrayGetSize(pGroupResInfo->pRows); } -static int64_t getNumOfResultWindowRes(STaskRuntimeEnv* pRuntimeEnv, SResultRowPosition *pos, int32_t* rowCellInfoOffset) { - STaskAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; - ASSERT(0); - - for (int32_t j = 0; j < pQueryAttr->numOfOutput; ++j) { - int32_t functionId = 0;//pQueryAttr->pExpr1[j].base.functionId; - - /* - * ts, tag, tagprj function can not decide the output number of current query - * the number of output result is decided by main output - */ - if (functionId == FUNCTION_TS || functionId == FUNCTION_TAG || functionId == FUNCTION_TAGPRJ) { - continue; - } - -// SResultRowEntryInfo *pResultInfo = getResultCell(pResultRow, j, rowCellInfoOffset); -// assert(pResultInfo != NULL); -// -// if (pResultInfo->numOfRes > 0) { -// return pResultInfo->numOfRes; -// } - } - - return 0; -} - static int32_t tableResultComparFn(const void *pLeft, const void *pRight, void *param) { int32_t left = *(int32_t *)pLeft; int32_t right = *(int32_t *)pRight; @@ -381,7 +343,7 @@ static int32_t mergeIntoGroupResultImplRv(STaskRuntimeEnv *pRuntimeEnv, SGroupRe } - int64_t num = getNumOfResultWindowRes(pRuntimeEnv, &pResultRowCell->pos, rowCellInfoOffset); + int64_t num = 0;//getNumOfResultWindowRes(pRuntimeEnv, &pResultRowCell->pos, rowCellInfoOffset); if (num <= 0) { continue; } diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 2811c8dce84918bc61339597150b15f56690b99d..fd62849e56805c22472a5ea438140ec655e20df0 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -19,7 +19,7 @@ #include "tdatablock.h" #include "vnode.h" -static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t numOfBlocks, int32_t type, char* id) { +static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t numOfBlocks, int32_t type, bool assignUid, char* id) { ASSERT(pOperator != NULL); if (pOperator->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { if (pOperator->numOfDownstream == 0) { @@ -32,11 +32,12 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu return TSDB_CODE_QRY_APP_ERROR; } pOperator->status = OP_NOT_OPENED; - return doSetStreamBlock(pOperator->pDownstream[0], input, numOfBlocks, type, id); + return doSetStreamBlock(pOperator->pDownstream[0], input, numOfBlocks, type, assignUid, id); } else { pOperator->status = OP_NOT_OPENED; SStreamBlockScanInfo* pInfo = pOperator->info; + pInfo->assignBlockUid = assignUid; // the block type can not be changed in the streamscan operators if (pInfo->blockType == 0) { @@ -67,11 +68,11 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu } } -int32_t qSetStreamInput(qTaskInfo_t tinfo, const void* input, int32_t type) { - return qSetMultiStreamInput(tinfo, input, 1, type); +int32_t qSetStreamInput(qTaskInfo_t tinfo, const void* input, int32_t type, bool assignUid) { + return qSetMultiStreamInput(tinfo, input, 1, type, assignUid); } -int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type) { +int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type, bool assignUid) { if (tinfo == NULL) { return TSDB_CODE_QRY_APP_ERROR; } @@ -82,7 +83,7 @@ int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numO SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; - int32_t code = doSetStreamBlock(pTaskInfo->pRoot, (void**)pBlocks, numOfBlocks, type, GET_TASKID(pTaskInfo)); + int32_t code = doSetStreamBlock(pTaskInfo->pRoot, (void**)pBlocks, numOfBlocks, type, assignUid, GET_TASKID(pTaskInfo)); if (code != TSDB_CODE_SUCCESS) { qError("%s failed to set the stream block data", GET_TASKID(pTaskInfo)); } else { diff --git a/source/libs/executor/src/executorMain.c b/source/libs/executor/src/executorMain.c index d4d8696abaa1906969077ed8829dff9113680b05..7757825733153741d6e83404051578f7f4e2aef8 100644 --- a/source/libs/executor/src/executorMain.c +++ b/source/libs/executor/src/executorMain.c @@ -30,13 +30,6 @@ #include "tlosertree.h" #include "ttypes.h" -typedef struct STaskMgmt { - TdThreadMutex lock; - SCacheObj *qinfoPool; // query handle pool - int32_t vgId; - bool closed; -} STaskMgmt; - int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId, SSubplan* pSubplan, qTaskInfo_t* pTaskInfo, DataSinkHandle* handle, EOPTR_EXEC_MODEL model) { assert(readHandle != NULL && pSubplan != NULL); @@ -131,36 +124,30 @@ int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pRes, uint64_t *useconds) { // error occurs, record the error code and return to client int32_t ret = setjmp(pTaskInfo->env); if (ret != TSDB_CODE_SUCCESS) { - publishQueryAbortEvent(pTaskInfo, ret); pTaskInfo->code = ret; cleanUpUdfs(); - qDebug("%s task abort due to error/cancel occurs, code:%s", GET_TASKID(pTaskInfo), - tstrerror(pTaskInfo->code)); + qDebug("%s task abort due to error/cancel occurs, code:%s", GET_TASKID(pTaskInfo), tstrerror(pTaskInfo->code)); return pTaskInfo->code; } qDebug("%s execTask is launched", GET_TASKID(pTaskInfo)); - publishOperatorProfEvent(pTaskInfo->pRoot, QUERY_PROF_BEFORE_OPERATOR_EXEC); - int64_t st = taosGetTimestampUs(); *pRes = pTaskInfo->pRoot->fpSet.getNextFn(pTaskInfo->pRoot); uint64_t el = (taosGetTimestampUs() - st); pTaskInfo->cost.elapsedTime += el; - - publishOperatorProfEvent(pTaskInfo->pRoot, QUERY_PROF_AFTER_OPERATOR_EXEC); - if (NULL == *pRes) { *useconds = pTaskInfo->cost.elapsedTime; } + cleanUpUdfs(); + int32_t current = (*pRes != NULL)? (*pRes)->info.rows:0; - pTaskInfo->totalRows += current; + uint64_t total = pTaskInfo->pRoot->resultInfo.totalRows; - cleanUpUdfs(); qDebug("%s task suspended, %d rows returned, total:%" PRId64 " rows, in sinkNode:%d, elapsed:%.2f ms", - GET_TASKID(pTaskInfo), current, pTaskInfo->totalRows, 0, el/1000.0); + GET_TASKID(pTaskInfo), current, total, 0, el/1000.0); atomic_store_64(&pTaskInfo->owner, 0); return pTaskInfo->code; @@ -210,7 +197,7 @@ int32_t qIsTaskCompleted(qTaskInfo_t qinfo) { void qDestroyTask(qTaskInfo_t qTaskHandle) { SExecTaskInfo* pTaskInfo = (SExecTaskInfo*) qTaskHandle; - qDebug("%s execTask completed, numOfRows:%"PRId64, GET_TASKID(pTaskInfo), pTaskInfo->totalRows); + qDebug("%s execTask completed, numOfRows:%"PRId64, GET_TASKID(pTaskInfo), pTaskInfo->pRoot->resultInfo.totalRows); queryCostStatis(pTaskInfo); // print the query cost summary doDestroyTask(pTaskInfo); diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 4475cb9e62c1c60800425c95224b399c9051059c..a40d6ea2e7b855e373af4b9789630895285a33ef 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -13,7 +13,6 @@ * along with this program. If not, see . */ -#include #include "filter.h" #include "function.h" #include "functionMgt.h" @@ -29,6 +28,7 @@ #include "ttime.h" #include "executorimpl.h" +#include "index.h" #include "query.h" #include "tcompare.h" #include "tcompression.h" @@ -87,8 +87,8 @@ static UNUSED_FUNC void* u_realloc(void* p, size_t __size) { #define realloc u_realloc #endif -#define CLEAR_QUERY_STATUS(q, st) ((q)->status &= (~(st))) -#define GET_NUM_OF_TABLEGROUP(q) taosArrayGetSize((q)->tableqinfoGroupInfo.pGroupList) +#define CLEAR_QUERY_STATUS(q, st) ((q)->status &= (~(st))) +//#define GET_NUM_OF_TABLEGROUP(q) taosArrayGetSize((q)->tableqinfoGroupInfo.pGroupList) #define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->interval.interval > 0) int32_t getMaximumIdleDurationSec() { return tsShellActivityTimer * 2; } @@ -99,7 +99,6 @@ static int32_t getExprFunctionId(SExprInfo* pExprInfo) { } static void doSetTagValueToResultBuf(char* output, const char* val, int16_t type, int16_t bytes); -static bool functionNeedToExecute(SqlFunctionCtx* pCtx); static void setBlockStatisInfo(SqlFunctionCtx* pCtx, SExprInfo* pExpr, SSDataBlock* pSDataBlock); @@ -107,7 +106,7 @@ static void destroyTableQueryInfoImpl(STableQueryInfo* pTableQueryInfo); static SColumnInfo* extractColumnFilterInfo(SExprInfo* pExpr, int32_t numOfOutput, int32_t* numOfFilterCols); -static void releaseQueryBuf(size_t numOfTables); +static void releaseQueryBuf(size_t numOfTables); static int32_t getNumOfScanTimes(STaskAttr* pQueryAttr); @@ -125,6 +124,8 @@ static void destroySysTableScannerOperatorInfo(void* param, int32_t numOfOutput) void doSetOperatorCompleted(SOperatorInfo* pOperator) { pOperator->status = OP_EXEC_DONE; + + pOperator->cost.totalCost = (taosGetTimestampUs() - pOperator->pTaskInfo->cost.start * 1000) / 1000.0; if (pOperator->pTaskInfo != NULL) { setTaskStatus(pOperator->pTaskInfo, TASK_COMPLETED); } @@ -138,7 +139,7 @@ int32_t operatorDummyOpenFn(SOperatorInfo* pOperator) { SOperatorFpSet createOperatorFpSet(__optr_open_fn_t openFn, __optr_fn_t nextFn, __optr_fn_t streamFn, __optr_fn_t cleanup, __optr_close_fn_t closeFn, __optr_encode_fn_t encode, - __optr_decode_fn_t decode, __optr_get_explain_fn_t explain) { + __optr_decode_fn_t decode, __optr_explain_fn_t explain) { SOperatorFpSet fpSet = { ._openFn = openFn, .getNextFn = nextFn, @@ -155,8 +156,9 @@ SOperatorFpSet createOperatorFpSet(__optr_open_fn_t openFn, __optr_fn_t nextFn, void operatorDummyCloseFn(void* param, int32_t numOfCols) {} -static int32_t doCopyToSDataBlock(SExecTaskInfo* taskInfo, SSDataBlock* pBlock, SExprInfo* pExprInfo, SDiskbasedBuf* pBuf, SGroupResInfo* pGroupResInfo, - const int32_t* rowCellOffset, SqlFunctionCtx* pCtx, int32_t numOfExprs); +static int32_t doCopyToSDataBlock(SExecTaskInfo* taskInfo, SSDataBlock* pBlock, SExprInfo* pExprInfo, + SDiskbasedBuf* pBuf, SGroupResInfo* pGroupResInfo, const int32_t* rowCellOffset, + SqlFunctionCtx* pCtx, int32_t numOfExprs); static void initCtxOutputBuffer(SqlFunctionCtx* pCtx, int32_t size); static void setResultBufSize(STaskAttr* pQueryAttr, SResultInfo* pResultInfo); @@ -183,10 +185,10 @@ static int compareRowData(const void* a, const void* b, const void* userData) { int16_t offset = supporter->dataOffset; return 0; -// char* in1 = getPosInResultPage(pRuntimeEnv->pQueryAttr, page1, pRow1->offset, offset); -// char* in2 = getPosInResultPage(pRuntimeEnv->pQueryAttr, page2, pRow2->offset, offset); + // char* in1 = getPosInResultPage(pRuntimeEnv->pQueryAttr, page1, pRow1->offset, offset); + // char* in2 = getPosInResultPage(pRuntimeEnv->pQueryAttr, page2, pRow2->offset, offset); -// return (in1 != NULL && in2 != NULL) ? supporter->comFunc(in1, in2) : 0; + // return (in1 != NULL && in2 != NULL) ? supporter->comFunc(in1, in2) : 0; } // setup the output buffer for each operator @@ -237,36 +239,6 @@ static bool hasNull(SColumn* pColumn, SColumnDataAgg* pStatis) { return true; } -static void prepareResultListBuffer(SResultRowInfo* pResultRowInfo, jmp_buf env) { - int64_t newCapacity = 0; - - // more than the capacity, reallocate the resources - if (pResultRowInfo->size < pResultRowInfo->capacity) { - return; - } - - if (pResultRowInfo->capacity > 10000) { - newCapacity = (int64_t)(pResultRowInfo->capacity * 1.25); - } else { - newCapacity = (int64_t)(pResultRowInfo->capacity * 1.5); - } - - if (newCapacity <= pResultRowInfo->capacity) { - newCapacity += 4; - } - - char* p = taosMemoryRealloc(pResultRowInfo->pPosition, newCapacity * sizeof(SResultRowPosition)); - if (p == NULL) { - longjmp(env, TSDB_CODE_OUT_OF_MEMORY); - } - - pResultRowInfo->pPosition = (SResultRowPosition*)p; - - int32_t inc = (int32_t)newCapacity - pResultRowInfo->capacity; - memset(&pResultRowInfo->pPosition[pResultRowInfo->capacity], 0, sizeof(SResultRowPosition) * inc); - pResultRowInfo->capacity = (int32_t)newCapacity; -} - static bool chkResultRowFromKey(STaskRuntimeEnv* pRuntimeEnv, SResultRowInfo* pResultRowInfo, char* pData, int16_t bytes, bool masterscan, uint64_t uid) { bool existed = false; @@ -304,7 +276,7 @@ static bool chkResultRowFromKey(STaskRuntimeEnv* pRuntimeEnv, SResultRowInfo* pR return p1 != NULL; } -SResultRow* getNewResultRow_rv(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int32_t interBufSize) { +SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int32_t interBufSize) { SFilePage* pData = NULL; // in the first scan, new space needed for results @@ -373,6 +345,8 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR // In case of group by column query, the required SResultRow object must be existInCurrentResusltRowInfo in the // pResultRowInfo object. if (p1 != NULL) { + + // todo pResult = getResultRowByPos(pResultBuf, p1); ASSERT(pResult->pageId == p1->pageId && pResult->offset == p1->offset); } @@ -381,34 +355,28 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR // 1. close current opened time window if (pResultRowInfo->cur.pageId != -1 && ((pResult == NULL) || (pResult->pageId != pResultRowInfo->cur.pageId && pResult->offset != pResultRowInfo->cur.offset))) { - // todo extract function SResultRowPosition pos = pResultRowInfo->cur; - SFilePage* pPage = getBufPage(pResultBuf, pos.pageId); - SResultRow* pRow = (SResultRow*)((char*)pPage + pos.offset); - closeResultRow(pRow); + SFilePage* pPage = getBufPage(pResultBuf, pos.pageId); releaseBufPage(pResultBuf, pPage); } // allocate a new buffer page - prepareResultListBuffer(pResultRowInfo, pTaskInfo->env); if (pResult == NULL) { ASSERT(pSup->resultRowSize > 0); - pResult = getNewResultRow_rv(pResultBuf, groupId, pSup->resultRowSize); + pResult = getNewResultRow(pResultBuf, groupId, pSup->resultRowSize); + initResultRow(pResult); // add a new result set for a new group SResultRowPosition pos = {.pageId = pResult->pageId, .offset = pResult->offset}; - taosHashPut(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes), &pos, - sizeof(SResultRowPosition)); + taosHashPut(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes), &pos, sizeof(SResultRowPosition)); } // 2. set the new time window to be the new active time window - pResultRowInfo->pPosition[pResultRowInfo->size++] = - (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset}; pResultRowInfo->cur = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset}; // too many time window in query - if (pResultRowInfo->size > MAX_INTERVAL_TIME_WINDOW) { + if (taosHashGetSize(pSup->pResultRowHashTable) > MAX_INTERVAL_TIME_WINDOW) { longjmp(pTaskInfo->env, TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW); } @@ -583,10 +551,13 @@ void initExecTimeWindowInfo(SColumnInfoData* pColData, STimeWindow* pQueryWindow colDataAppendInt64(pColData, 4, &pQueryWindow->ekey); } -void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, STimeWindow* pWin, SColumnInfoData* pTimeWindowData, int32_t offset, - int32_t forwardStep, TSKEY* tsCol, int32_t numOfTotal, int32_t numOfOutput, int32_t order) { + +void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, STimeWindow* pWin, + SColumnInfoData* pTimeWindowData, int32_t offset, int32_t forwardStep, TSKEY* tsCol, + int32_t numOfTotal, int32_t numOfOutput, int32_t order) { for (int32_t k = 0; k < numOfOutput; ++k) { // keep it temporarily + // todo no need this?? bool hasAgg = pCtx[k].input.colDataAggIsSet; int32_t numOfRows = pCtx[k].input.numOfRows; int32_t startOffset = pCtx[k].input.startRowIndex; @@ -606,7 +577,8 @@ void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, STimeWindow if (fmIsWindowPseudoColumnFunc(pCtx[k].functionId)) { SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(&pCtx[k]); - char* p = GET_ROWCELL_INTERBUF(pEntryInfo); + + char* p = GET_ROWCELL_INTERBUF(pEntryInfo); SColumnInfoData idata = {0}; idata.info.type = TSDB_DATA_TYPE_BIGINT; @@ -617,22 +589,23 @@ void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, STimeWindow SScalarParam tw = {.numOfRows = 5, .columnData = pTimeWindowData}; pCtx[k].sfp.process(&tw, 1, &out); pEntryInfo->numOfRes = 1; - continue; - } - int32_t code = TSDB_CODE_SUCCESS; - if (functionNeedToExecute(&pCtx[k]) && pCtx[k].fpSet.process != NULL) { - code = pCtx[k].fpSet.process(&pCtx[k]); - if (code != TSDB_CODE_SUCCESS) { - qError("%s apply functions error, code: %s", GET_TASKID(taskInfo), tstrerror(code)); - taskInfo->code = code; - longjmp(taskInfo->env, code); + } else { + int32_t code = TSDB_CODE_SUCCESS; + if (functionNeedToExecute(&pCtx[k]) && pCtx[k].fpSet.process != NULL) { + code = pCtx[k].fpSet.process(&pCtx[k]); + + if (code != TSDB_CODE_SUCCESS) { + qError("%s apply functions error, code: %s", GET_TASKID(taskInfo), tstrerror(code)); + taskInfo->code = code; + longjmp(taskInfo->env, code); + } } - } - // restore it - pCtx[k].input.colDataAggIsSet = hasAgg; - pCtx[k].input.startRowIndex = startOffset; - pCtx[k].input.numOfRows = numOfRows; + // restore it + pCtx[k].input.colDataAggIsSet = hasAgg; + pCtx[k].input.startRowIndex = startOffset; + pCtx[k].input.numOfRows = numOfRows; + } } } @@ -666,8 +639,8 @@ static void doSetInputDataBlockInfo(SOperatorInfo* pOperator, SqlFunctionCtx* pC } } -void setInputDataBlock(SOperatorInfo* pOperator, SqlFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t order, int32_t scanFlag, - bool createDummyCol) { +void setInputDataBlock(SOperatorInfo* pOperator, SqlFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t order, + int32_t scanFlag, bool createDummyCol) { if (pBlock->pBlockAgg != NULL) { doSetInputDataBlockInfo(pOperator, pCtx, pBlock, order); } else { @@ -718,7 +691,7 @@ static int32_t doCreateConstantValColumnInfo(SInputColumnInfoData* pInput, SFunc } static int32_t doSetInputDataBlock(SOperatorInfo* pOperator, SqlFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t order, - int32_t scanFlag, bool createDummyCol) { + int32_t scanFlag, bool createDummyCol) { int32_t code = TSDB_CODE_SUCCESS; for (int32_t i = 0; i < pOperator->numOfExprs; ++i) { @@ -726,7 +699,7 @@ static int32_t doSetInputDataBlock(SOperatorInfo* pOperator, SqlFunctionCtx* pCt pCtx[i].input.numOfRows = pBlock->info.rows; pCtx[i].pSrcBlock = pBlock; - pCtx[i].scanFlag = scanFlag; + pCtx[i].scanFlag = scanFlag; SInputColumnInfoData* pInput = &pCtx[i].input; pInput->uid = pBlock->info.uid; @@ -771,12 +744,14 @@ static int32_t doAggregateImpl(SOperatorInfo* pOperator, TSKEY startTs, SqlFunct for (int32_t k = 0; k < pOperator->numOfExprs; ++k) { if (functionNeedToExecute(&pCtx[k])) { // todo add a dummy funtion to avoid process check - if (pCtx[k].fpSet.process != NULL) { - int32_t code = pCtx[k].fpSet.process(&pCtx[k]); - if (code != TSDB_CODE_SUCCESS) { - qError("%s aggregate function error happens, code: %s", GET_TASKID(pOperator->pTaskInfo), tstrerror(code)); - return code; - } + if (pCtx[k].fpSet.process == NULL) { + continue; + } + + int32_t code = pCtx[k].fpSet.process(&pCtx[k]); + if (code != TSDB_CODE_SUCCESS) { + qError("%s aggregate function error happens, code: %s", GET_TASKID(pOperator->pTaskInfo), tstrerror(code)); + return code; } } } @@ -835,7 +810,7 @@ int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBloc SColumnInfoData idata = {.info = pResColData->info, .hasNull = true}; SScalarParam dest = {.columnData = &idata}; - int32_t code = scalarCalculate(pExpr[k].pExpr->_optrRoot.pRootNode, pBlockList, &dest); + int32_t code = scalarCalculate(pExpr[k].pExpr->_optrRoot.pRootNode, pBlockList, &dest); if (code != TSDB_CODE_SUCCESS) { taosArrayDestroy(pBlockList); return code; @@ -853,7 +828,7 @@ int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBloc // _rowts/_c0, not tbname column if (fmIsPseudoColumnFunc(pfCtx->functionId) && (!fmIsScanPseudoColumnFunc(pfCtx->functionId))) { // do nothing - } else if (fmIsNonstandardSQLFunc(pfCtx->functionId)) { + } else if (fmIsIndefiniteRowsFunc(pfCtx->functionId)) { SResultRowEntryInfo* pResInfo = GET_RES_INFO(&pCtx[k]); pfCtx->fpSet.init(&pCtx[k], pResInfo); @@ -934,7 +909,7 @@ int32_t setGroupResultOutputBuf(SOptrBasicInfo* binfo, int32_t numOfCols, char* return TSDB_CODE_SUCCESS; } -static bool functionNeedToExecute(SqlFunctionCtx* pCtx) { +bool functionNeedToExecute(SqlFunctionCtx* pCtx) { struct SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); // in case of timestamp column, always generated results. @@ -951,14 +926,14 @@ static bool functionNeedToExecute(SqlFunctionCtx* pCtx) { return false; } -// if (functionId == FUNCTION_FIRST_DST || functionId == FUNCTION_FIRST) { -// // return QUERY_IS_ASC_QUERY(pQueryAttr); -// } -// -// // denote the order type -// if ((functionId == FUNCTION_LAST_DST || functionId == FUNCTION_LAST)) { -// // return pCtx->param[0].i == pQueryAttr->order.order; -// } + // if (functionId == FUNCTION_FIRST_DST || functionId == FUNCTION_FIRST) { + // // return QUERY_IS_ASC_QUERY(pQueryAttr); + // } + // + // // denote the order type + // if ((functionId == FUNCTION_LAST_DST || functionId == FUNCTION_LAST)) { + // // return pCtx->param[0].i == pQueryAttr->order.order; + // } // in the reverse table scan, only the following functions need to be executed // if (IS_REVERSE_SCAN(pRuntimeEnv) || @@ -1073,19 +1048,19 @@ static int32_t setSelectValueColumnInfo(SqlFunctionCtx* pCtx, int32_t numOfOutpu for (int32_t i = 0; i < numOfOutput; ++i) { if (strcmp(pCtx[i].pExpr->pExpr->_function.functionName, "_select_value") == 0) { pValCtx[num++] = &pCtx[i]; - } else if (fmIsAggFunc(pCtx[i].functionId)) { + } else if (fmIsSelectFunc(pCtx[i].functionId)) { p = &pCtx[i]; } -// if (functionId == FUNCTION_TAG_DUMMY || functionId == FUNCTION_TS_DUMMY) { -// tagLen += pCtx[i].resDataInfo.bytes; -// pTagCtx[num++] = &pCtx[i]; -// } else if (functionId == FUNCTION_TS || functionId == FUNCTION_TAG) { -// // tag function may be the group by tag column -// // ts may be the required primary timestamp column -// continue; -// } else { -// // the column may be the normal column, group by normal_column, the functionId is FUNCTION_PRJ -// } + // if (functionId == FUNCTION_TAG_DUMMY || functionId == FUNCTION_TS_DUMMY) { + // tagLen += pCtx[i].resDataInfo.bytes; + // pTagCtx[num++] = &pCtx[i]; + // } else if (functionId == FUNCTION_TS || functionId == FUNCTION_TAG) { + // // tag function may be the group by tag column + // // ts may be the required primary timestamp column + // continue; + // } else { + // // the column may be the normal column, group by normal_column, the functionId is FUNCTION_PRJ + // } } if (p != NULL) { @@ -1124,7 +1099,7 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, SFuncExecEnv env = {0}; pCtx->functionId = pExpr->pExpr->_function.pFunctNode->funcId; - if (fmIsAggFunc(pCtx->functionId) || fmIsNonstandardSQLFunc(pCtx->functionId)) { + if (fmIsAggFunc(pCtx->functionId) || fmIsIndefiniteRowsFunc(pCtx->functionId)) { bool isUdaf = fmIsUserDefinedFunc(pCtx->functionId); if (!isUdaf) { fmGetFuncExecFuncs(pCtx->functionId, &pCtx->fpSet); @@ -1215,7 +1190,6 @@ static void* destroySqlFunctionCtx(SqlFunctionCtx* pCtx, int32_t numOfOutput) { taosVariantDestroy(&pCtx[i].param[j].param); } - taosVariantDestroy(&pCtx[i].tag); taosMemoryFreeClear(pCtx[i].subsidiaries.pCtx); taosMemoryFree(pCtx[i].input.pData); taosMemoryFree(pCtx[i].input.pColumnDataAgg); @@ -1245,9 +1219,9 @@ void setTaskKilled(SExecTaskInfo* pTaskInfo) { pTaskInfo->code = TSDB_CODE_TSC_Q static bool isCachedLastQuery(STaskAttr* pQueryAttr) { for (int32_t i = 0; i < pQueryAttr->numOfOutput; ++i) { int32_t functionId = getExprFunctionId(&pQueryAttr->pExpr1[i]); - if (functionId == FUNCTION_LAST || functionId == FUNCTION_LAST_DST) { - continue; - } +// if (functionId == FUNCTION_LAST || functionId == FUNCTION_LAST_DST) { +// continue; +// } return false; } @@ -1297,7 +1271,7 @@ static int32_t updateBlockLoadStatus(STaskAttr* pQuery, int32_t status) { for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { int32_t functionId = getExprFunctionId(&pQuery->pExpr1[i]); - +#if 0 if (functionId == FUNCTION_TS || functionId == FUNCTION_TS_DUMMY || functionId == FUNCTION_TAG || functionId == FUNCTION_TAG_DUMMY) { continue; @@ -1308,6 +1282,8 @@ static int32_t updateBlockLoadStatus(STaskAttr* pQuery, int32_t status) { } else { hasOtherFunc = true; } +#endif + } if (hasFirstLastFunc && status == BLK_DATA_NOT_LOAD) { @@ -1745,8 +1721,7 @@ void setFunctionResultOutput(SOptrBasicInfo* pInfo, SAggSupporter* pSup, int32_t SResultRow* pRow = doSetResultOutBufByKey(pSup->pResultBuf, pResultRowInfo, (char*)&tid, sizeof(tid), true, groupId, pTaskInfo, false, pSup); - ASSERT(pDataBlock->info.numOfCols == numOfExprs); - for (int32_t i = 0; i < pDataBlock->info.numOfCols; ++i) { + for (int32_t i = 0; i < numOfExprs; ++i) { struct SResultRowEntryInfo* pEntry = getResultCell(pRow, i, rowCellInfoOffset); cleanupResultRowEntry(pEntry); @@ -1754,7 +1729,7 @@ void setFunctionResultOutput(SOptrBasicInfo* pInfo, SAggSupporter* pSup, int32_t pCtx[i].scanFlag = stage; } - initCtxOutputBuffer(pCtx, pDataBlock->info.numOfCols); + initCtxOutputBuffer(pCtx, numOfExprs); } void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t* bufCapacity, int32_t numOfInputRows) { @@ -1784,41 +1759,13 @@ void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t* bufCapacity, int32_t numOf // set the correct pointer after the memory buffer reallocated. int32_t functionId = pBInfo->pCtx[i].functionId; - +#if 0 if (functionId == FUNCTION_TOP || functionId == FUNCTION_BOTTOM || functionId == FUNCTION_DIFF || functionId == FUNCTION_DERIVATIVE) { // if (i > 0) pBInfo->pCtx[i].pTsOutput = pBInfo->pCtx[i - 1].pOutput; } - } -} +#endif -void copyTsColoum(SSDataBlock* pRes, SqlFunctionCtx* pCtx, int32_t numOfOutput) { - bool needCopyTs = false; - int32_t tsNum = 0; - char* src = NULL; - for (int32_t i = 0; i < numOfOutput; i++) { - int32_t functionId = pCtx[i].functionId; - if (functionId == FUNCTION_DIFF || functionId == FUNCTION_DERIVATIVE) { - needCopyTs = true; - if (i > 0 && pCtx[i - 1].functionId == FUNCTION_TS_DUMMY) { - SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, i - 1); // find ts data - src = pColRes->pData; - } - } else if (functionId == FUNCTION_TS_DUMMY) { - tsNum++; - } - } - - if (!needCopyTs) return; - if (tsNum < 2) return; - if (src == NULL) return; - - for (int32_t i = 0; i < numOfOutput; i++) { - int32_t functionId = pCtx[i].functionId; - if (functionId == FUNCTION_TS_DUMMY) { - SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, i); - memcpy(pColRes->pData, src, pColRes->info.bytes * pRes->info.rows); - } } } @@ -1844,12 +1791,6 @@ void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status) { } } -STableQueryInfo* createTableQueryInfo(void* buf, STimeWindow win) { - STableQueryInfo* pTableQueryInfo = buf; - pTableQueryInfo->lastKey = win.skey; - return pTableQueryInfo; -} - void destroyTableQueryInfoImpl(STableQueryInfo* pTableQueryInfo) { if (pTableQueryInfo == NULL) { return; @@ -1883,7 +1824,7 @@ void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numO } static void extractQualifiedTupleByFilterResult(SSDataBlock* pBlock, const int8_t* rowRes, bool keep); -void doFilter(const SNode* pFilterNode, SSDataBlock* pBlock, SArray* pColMatchInfo) { +void doFilter(const SNode* pFilterNode, SSDataBlock* pBlock, SArray* pColMatchInfo) { if (pFilterNode == NULL) { return; } @@ -2006,8 +1947,9 @@ static void doUpdateNumOfRows(SResultRow* pRow, int32_t numOfExprs, const int32_ } } -int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprInfo* pExprInfo, SDiskbasedBuf* pBuf, SGroupResInfo* pGroupResInfo, - const int32_t* rowCellOffset, SqlFunctionCtx* pCtx, int32_t numOfExprs) { +int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprInfo* pExprInfo, SDiskbasedBuf* pBuf, + SGroupResInfo* pGroupResInfo, const int32_t* rowCellOffset, SqlFunctionCtx* pCtx, + int32_t numOfExprs) { int32_t numOfRows = getNumOfTotalRes(pGroupResInfo); int32_t start = pGroupResInfo->index; @@ -2056,11 +1998,11 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprI } else { // expand the result into multiple rows. E.g., _wstartts, top(k, 20) // the _wstartts needs to copy to 20 following rows, since the results of top-k expands to 20 different rows. - SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, slotId); - char* in = GET_ROWCELL_INTERBUF(pCtx[j].resultInfo); - for(int32_t k = 0; k < pRow->numOfRows; ++k) { - colDataAppend(pColInfoData, pBlock->info.rows + k, in, pCtx[j].resultInfo->isNullRes); - } + SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, slotId); + char* in = GET_ROWCELL_INTERBUF(pCtx[j].resultInfo); + for (int32_t k = 0; k < pRow->numOfRows; ++k) { + colDataAppend(pColInfoData, pBlock->info.rows + k, in, pCtx[j].resultInfo->isNullRes); + } } } @@ -2071,14 +2013,16 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprI } } - qDebug("%s result generated, rows:%d, groupId:%"PRIu64, GET_TASKID(pTaskInfo), pBlock->info.rows, pBlock->info.groupId); + qDebug("%s result generated, rows:%d, groupId:%" PRIu64, GET_TASKID(pTaskInfo), pBlock->info.rows, + pBlock->info.groupId); blockDataUpdateTsWindow(pBlock, 0); return 0; } -void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SGroupResInfo* pGroupResInfo, SDiskbasedBuf* pBuf) { - SExprInfo* pExprInfo = pOperator->pExpr; - int32_t numOfExprs = pOperator->numOfExprs; +void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SGroupResInfo* pGroupResInfo, + SDiskbasedBuf* pBuf) { + SExprInfo* pExprInfo = pOperator->pExpr; + int32_t numOfExprs = pOperator->numOfExprs; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; int32_t* rowCellOffset = pbInfo->rowCellInfoOffset; @@ -2136,102 +2080,6 @@ int32_t doFillTimeIntervalGapsInResults(struct SFillInfo* pFillInfo, SSDataBlock return pBlock->info.rows; } -void publishOperatorProfEvent(SOperatorInfo* pOperator, EQueryProfEventType eventType) { - SQueryProfEvent event = {0}; - - event.eventType = eventType; - event.eventTime = taosGetTimestampUs(); - event.operatorType = pOperator->operatorType; - // if (pQInfo->summary.queryProfEvents) { - // taosArrayPush(pQInfo->summary.queryProfEvents, &event); - // } -} - -void publishQueryAbortEvent(SExecTaskInfo* pTaskInfo, int32_t code) { - SQueryProfEvent event; - event.eventType = QUERY_PROF_QUERY_ABORT; - event.eventTime = taosGetTimestampUs(); - event.abortCode = code; - - if (pTaskInfo->cost.queryProfEvents) { - taosArrayPush(pTaskInfo->cost.queryProfEvents, &event); - } -} - -typedef struct { - uint8_t operatorType; - int64_t beginTime; - int64_t endTime; - int64_t selfTime; - int64_t descendantsTime; -} SOperatorStackItem; - -static void doOperatorExecProfOnce(SOperatorStackItem* item, SQueryProfEvent* event, SArray* opStack, - SHashObj* profResults) { - item->endTime = event->eventTime; - item->selfTime = (item->endTime - item->beginTime) - (item->descendantsTime); - - for (int32_t j = 0; j < taosArrayGetSize(opStack); ++j) { - SOperatorStackItem* ancestor = taosArrayGet(opStack, j); - ancestor->descendantsTime += item->selfTime; - } - - uint8_t operatorType = item->operatorType; - SOperatorProfResult* result = taosHashGet(profResults, &operatorType, sizeof(operatorType)); - if (result != NULL) { - result->sumRunTimes++; - result->sumSelfTime += item->selfTime; - } else { - SOperatorProfResult opResult; - opResult.operatorType = operatorType; - opResult.sumSelfTime = item->selfTime; - opResult.sumRunTimes = 1; - taosHashPut(profResults, &(operatorType), sizeof(operatorType), &opResult, sizeof(opResult)); - } -} - -void calculateOperatorProfResults(void) { - // if (pQInfo->summary.queryProfEvents == NULL) { - // // qDebug("QInfo:0x%"PRIx64" query prof events array is null", pQInfo->qId); - // return; - // } - // - // if (pQInfo->summary.operatorProfResults == NULL) { - // // qDebug("QInfo:0x%"PRIx64" operator prof results hash is null", pQInfo->qId); - // return; - // } - - SArray* opStack = taosArrayInit(32, sizeof(SOperatorStackItem)); - if (opStack == NULL) { - return; - } -#if 0 - size_t size = taosArrayGetSize(pQInfo->summary.queryProfEvents); - SHashObj* profResults = pQInfo->summary.operatorProfResults; - - for (int i = 0; i < size; ++i) { - SQueryProfEvent* event = taosArrayGet(pQInfo->summary.queryProfEvents, i); - if (event->eventType == QUERY_PROF_BEFORE_OPERATOR_EXEC) { - SOperatorStackItem opItem; - opItem.operatorType = event->operatorType; - opItem.beginTime = event->eventTime; - opItem.descendantsTime = 0; - taosArrayPush(opStack, &opItem); - } else if (event->eventType == QUERY_PROF_AFTER_OPERATOR_EXEC) { - SOperatorStackItem* item = taosArrayPop(opStack); - assert(item->operatorType == event->operatorType); - doOperatorExecProfOnce(item, event, opStack, profResults); - } else if (event->eventType == QUERY_PROF_QUERY_ABORT) { - SOperatorStackItem* item; - while ((item = taosArrayPop(opStack)) != NULL) { - doOperatorExecProfOnce(item, event, opStack, profResults); - } - } - } -#endif - taosArrayDestroy(opStack); -} - void queryCostStatis(SExecTaskInfo* pTaskInfo) { STaskCostInfo* pSummary = &pTaskInfo->cost; @@ -2264,15 +2112,6 @@ void queryCostStatis(SExecTaskInfo* pTaskInfo) { // qDebug("QInfo:0x%"PRIx64" :cost summary: winResPool size:%.2f Kb, numOfWin:%"PRId64", tableInfoSize:%.2f Kb, // hashTable:%.2f Kb", pQInfo->qId, pSummary->winInfoSize/1024.0, // pSummary->numOfTimeWindows, pSummary->tableInfoSize/1024.0, pSummary->hashSize/1024.0); - - if (pSummary->operatorProfResults) { - SOperatorProfResult* opRes = taosHashIterate(pSummary->operatorProfResults, NULL); - while (opRes != NULL) { - // qDebug("QInfo:0x%" PRIx64 " :cost summary: operator : %d, exec times: %" PRId64 ", self time: %" PRId64, - // pQInfo->qId, opRes->operatorType, opRes->sumRunTimes, opRes->sumSelfTime); - opRes = taosHashIterate(pSummary->operatorProfResults, opRes); - } - } } // static void updateOffsetVal(STaskRuntimeEnv *pRuntimeEnv, SDataBlockInfo *pBlockInfo) { @@ -2531,7 +2370,7 @@ int32_t appendDownstream(SOperatorInfo* p, SOperatorInfo** pDownstream, int32_t return TSDB_CODE_SUCCESS; } -static void doDestroyTableQueryInfo(STableGroupInfo* pTableqinfoGroupInfo); +static void doDestroyTableList(STableListInfo* pTableqinfoList); static void doTableQueryInfoTimeWindowCheck(SExecTaskInfo* pTaskInfo, STableQueryInfo* pTableQueryInfo, int32_t order) { #if 0 @@ -2683,46 +2522,7 @@ int32_t setSDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLoadI int32_t compLen, int32_t numOfOutput, int64_t startTs, uint64_t* total, SArray* pColList) { if (pColList == NULL) { // data from other sources - blockDataEnsureCapacity(pRes, numOfRows); - - int32_t dataLen = *(int32_t*)pData; - pData += sizeof(int32_t); - - pRes->info.groupId = *(uint64_t*)pData; - pData += sizeof(uint64_t); - - int32_t* colLen = (int32_t*)pData; - - char* pStart = pData + sizeof(int32_t) * numOfOutput; - for (int32_t i = 0; i < numOfOutput; ++i) { - colLen[i] = htonl(colLen[i]); - ASSERT(colLen[i] >= 0); - - SColumnInfoData* pColInfoData = taosArrayGet(pRes->pDataBlock, i); - if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) { - pColInfoData->varmeta.length = colLen[i]; - pColInfoData->varmeta.allocLen = colLen[i]; - - memcpy(pColInfoData->varmeta.offset, pStart, sizeof(int32_t) * numOfRows); - pStart += sizeof(int32_t) * numOfRows; - - if (colLen[i] > 0) { - pColInfoData->pData = taosMemoryMalloc(colLen[i]); - } - } else { - memcpy(pColInfoData->nullbitmap, pStart, BitmapLen(numOfRows)); - pStart += BitmapLen(numOfRows); - } - - if (colLen[i] > 0) { - memcpy(pColInfoData->pData, pStart, colLen[i]); - } - - // TODO setting this flag to true temporarily so aggregate function on stable will - // examine NULL value for non-primary key column - pColInfoData->hasNull = true; - pStart += colLen[i]; - } + blockCompressDecode(pRes, numOfOutput, numOfRows, pData); } else { // extract data according to pColList ASSERT(numOfOutput == taosArrayGetSize(pColList)); char* pStart = pData; @@ -2747,10 +2547,10 @@ int32_t setSDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLoadI for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData idata = {0}; - idata.info.type = pSchema[i].type; + idata.info.type = pSchema[i].type; idata.info.bytes = pSchema[i].bytes; idata.info.colId = pSchema[i].colId; - idata.hasNull = true; + idata.hasNull = true; taosArrayPush(pBlock->pDataBlock, &idata); if (IS_VAR_DATA_TYPE(idata.info.type)) { @@ -2820,6 +2620,7 @@ static void* setAllSourcesCompleted(SOperatorInfo* pOperator, int64_t startTs) { int64_t el = taosGetTimestampUs() - startTs; SLoadRemoteDataInfo* pLoadInfo = &pExchangeInfo->loadInfo; + pLoadInfo->totalElapsed += el; size_t totalSources = taosArrayGetSize(pExchangeInfo->pSources); @@ -2863,6 +2664,7 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx pExchangeInfo->loadInfo.totalRows); pDataInfo->status = EX_SOURCE_DATA_EXHAUSTED; completed += 1; + taosMemoryFreeClear(pDataInfo->pRsp); continue; } @@ -2870,6 +2672,7 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx code = setSDataBlockFromFetchRsp(pExchangeInfo->pResult, pLoadInfo, pTableRsp->numOfRows, pTableRsp->data, pTableRsp->compLen, pTableRsp->numOfCols, startTs, &pDataInfo->totalRows, NULL); if (code != 0) { + taosMemoryFreeClear(pDataInfo->pRsp); goto _error; } @@ -2890,10 +2693,12 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx pDataInfo->status = EX_SOURCE_DATA_NOT_READY; code = doSendFetchDataRequest(pExchangeInfo, pTaskInfo, i); if (code != TSDB_CODE_SUCCESS) { + taosMemoryFreeClear(pDataInfo->pRsp); goto _error; } } + taosMemoryFreeClear(pDataInfo->pRsp); return pExchangeInfo->pResult; } @@ -2996,6 +2801,7 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) { pDataInfo->status = EX_SOURCE_DATA_EXHAUSTED; pExchangeInfo->current += 1; + taosMemoryFreeClear(pDataInfo->pRsp); continue; } @@ -3020,6 +2826,8 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) { pLoadInfo->totalSize); } + pOperator->resultInfo.totalRows += pRes->info.rows; + taosMemoryFreeClear(pDataInfo->pRsp); return pExchangeInfo->pResult; } } @@ -3029,10 +2837,10 @@ static int32_t prepareLoadRemoteData(SOperatorInfo* pOperator) { return TSDB_CODE_SUCCESS; } + int64_t st = taosGetTimestampUs(); + SExchangeInfo* pExchangeInfo = pOperator->info; - if (pExchangeInfo->seqLoadData) { - // do nothing for sequentially load data - } else { + if (!pExchangeInfo->seqLoadData) { int32_t code = prepareConcurrentlyLoad(pOperator); if (code != TSDB_CODE_SUCCESS) { return code; @@ -3040,6 +2848,7 @@ static int32_t prepareLoadRemoteData(SOperatorInfo* pOperator) { } OPTR_SET_OPENED(pOperator); + pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0; return TSDB_CODE_SUCCESS; } @@ -3067,15 +2876,6 @@ static SSDataBlock* doLoadRemoteData(SOperatorInfo* pOperator) { } else { return concurrentlyLoadRemoteData(pOperator); } - -#if 0 - _error: - taosMemoryFreeClear(pMsg); - taosMemoryFreeClear(pMsgSendInfo); - - terrno = pTaskInfo->code; - return NULL; -#endif } static int32_t initDataSource(int32_t numOfSources, SExchangeInfo* pInfo) { @@ -3100,16 +2900,12 @@ static int32_t initDataSource(int32_t numOfSources, SExchangeInfo* pInfo) { return TSDB_CODE_SUCCESS; } -SOperatorInfo* createExchangeOperatorInfo(void *pTransporter, const SNodeList* pSources, SSDataBlock* pBlock, +SOperatorInfo* createExchangeOperatorInfo(void* pTransporter, const SNodeList* pSources, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo) { SExchangeInfo* pInfo = taosMemoryCalloc(1, sizeof(SExchangeInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); - if (pInfo == NULL || pOperator == NULL) { - taosMemoryFreeClear(pInfo); - taosMemoryFreeClear(pOperator); - terrno = TSDB_CODE_QRY_OUT_OF_MEMORY; - return NULL; + goto _error; } size_t numOfSources = LIST_LENGTH(pSources); @@ -3145,7 +2941,6 @@ SOperatorInfo* createExchangeOperatorInfo(void *pTransporter, const SNodeList* p pOperator->fpSet = createOperatorFpSet(prepareLoadRemoteData, doLoadRemoteData, NULL, NULL, destroyExchangeOperatorInfo, NULL, NULL, NULL); pInfo->pTransporter = pTransporter; - return pOperator; _error: @@ -3213,7 +3008,7 @@ static bool needToMerge(SSDataBlock* pBlock, SArray* groupInfo, char** buf, int3 static void doMergeResultImpl(SSortedMergeOperatorInfo* pInfo, SqlFunctionCtx* pCtx, int32_t numOfExpr, int32_t rowIndex) { for (int32_t j = 0; j < numOfExpr; ++j) { // TODO set row index -// pCtx[j].startRow = rowIndex; + // pCtx[j].startRow = rowIndex; } for (int32_t j = 0; j < numOfExpr; ++j) { @@ -3264,7 +3059,7 @@ static void doMergeImpl(SOperatorInfo* pOperator, int32_t numOfExpr, SSDataBlock SqlFunctionCtx* pCtx = pInfo->binfo.pCtx; for (int32_t i = 0; i < pBlock->info.numOfCols; ++i) { -// pCtx[i].size = 1; + // pCtx[i].size = 1; } for (int32_t i = 0; i < pBlock->info.rows; ++i) { @@ -3490,10 +3285,11 @@ _error: return NULL; } -int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t *order, int32_t* scanFlag) { +int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scanFlag) { // todo add more information about exchange operation int32_t type = pOperator->operatorType; - if (type == QUERY_NODE_PHYSICAL_PLAN_EXCHANGE || type == QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN || type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { + if (type == QUERY_NODE_PHYSICAL_PLAN_EXCHANGE || type == QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN || + type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN || type == QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN) { *order = TSDB_ORDER_ASC; *scanFlag = MAIN_SCAN; return TSDB_CODE_SUCCESS; @@ -3521,16 +3317,15 @@ static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) { SAggOperatorInfo* pAggInfo = pOperator->info; SOptrBasicInfo* pInfo = &pAggInfo->binfo; - SOperatorInfo* downstream = pOperator->pDownstream[0]; + SOperatorInfo* downstream = pOperator->pDownstream[0]; + + int64_t st = taosGetTimestampUs(); int32_t order = TSDB_ORDER_ASC; int32_t scanFlag = MAIN_SCAN; while (1) { - publishOperatorProfEvent(downstream, QUERY_PROF_BEFORE_OPERATOR_EXEC); SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream); - publishOperatorProfEvent(downstream, QUERY_PROF_AFTER_OPERATOR_EXEC); - if (pBlock == NULL) { break; } @@ -3558,14 +3353,14 @@ static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) { } #if 0 // test for encode/decode result info - if(pOperator->encodeResultRow){ + if(pOperator->fpSet.encodeResultRow){ char *result = NULL; int32_t length = 0; - SAggSupporter *pSup = &pAggInfo->aggSup; - pOperator->encodeResultRow(pOperator, pSup, pInfo, &result, &length); + pOperator->fpSet.encodeResultRow(pOperator, &result, &length); + SAggSupporter* pSup = &pAggInfo->aggSup; taosHashClear(pSup->pResultRowHashTable); pInfo->resultRowInfo.size = 0; - pOperator->decodeResultRow(pOperator, pSup, pInfo, result, length); + pOperator->fpSet.decodeResultRow(pOperator, result); if(result){ taosMemoryFree(result); } @@ -3576,6 +3371,8 @@ static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) { closeAllResultRows(&pAggInfo->binfo.resultRowInfo); initGroupedResultInfo(&pAggInfo->groupResInfo, pAggInfo->aggSup.pResultRowHashTable, 0); OPTR_SET_OPENED(pOperator); + + pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0; return TSDB_CODE_SUCCESS; } @@ -3590,6 +3387,7 @@ static SSDataBlock* getAggregateResult(SOperatorInfo* pOperator) { SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; pTaskInfo->code = pOperator->fpSet._openFn(pOperator); if (pTaskInfo->code != TSDB_CODE_SUCCESS) { + doSetOperatorCompleted(pOperator); return NULL; } @@ -3599,20 +3397,31 @@ static SSDataBlock* getAggregateResult(SOperatorInfo* pOperator) { doSetOperatorCompleted(pOperator); } - return (blockDataGetNumOfRows(pInfo->pRes) != 0) ? pInfo->pRes : NULL; + size_t rows = blockDataGetNumOfRows(pInfo->pRes); // pInfo->pRes : NULL; + pOperator->resultInfo.totalRows += rows; + + return (rows == 0) ? NULL : pInfo->pRes; } -void aggEncodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasicInfo* pInfo, char** result, - int32_t* length) { - int32_t size = taosHashGetSize(pSup->pResultRowHashTable); - size_t keyLen = sizeof(uint64_t) * 2; // estimate the key length - int32_t totalSize = sizeof(int32_t) + size * (sizeof(int32_t) + keyLen + sizeof(int32_t) + pSup->resultRowSize); - *result = taosMemoryCalloc(1, totalSize); +int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* length) { + if (result == NULL || length == NULL) { + return TSDB_CODE_TSC_INVALID_INPUT; + } + SOptrBasicInfo* pInfo = (SOptrBasicInfo*)(pOperator->info); + SAggSupporter* pSup = (SAggSupporter*)POINTER_SHIFT(pOperator->info, sizeof(SOptrBasicInfo)); + int32_t size = taosHashGetSize(pSup->pResultRowHashTable); + size_t keyLen = sizeof(uint64_t) * 2; // estimate the key length + int32_t totalSize = + sizeof(int32_t) + sizeof(int32_t) + size * (sizeof(int32_t) + keyLen + sizeof(int32_t) + pSup->resultRowSize); + + *result = (char*)taosMemoryCalloc(1, totalSize); if (*result == NULL) { - longjmp(pOperator->pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY); + return TSDB_CODE_OUT_OF_MEMORY; } - *(int32_t*)(*result) = size; + int32_t offset = sizeof(int32_t); + *(int32_t*)(*result + offset) = size; + offset += sizeof(int32_t); // prepare memory SResultRowPosition* pos = &pInfo->resultRowInfo.cur; @@ -3634,12 +3443,11 @@ void aggEncodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasi // recalculate the result size int32_t realTotalSize = offset + sizeof(int32_t) + keyLen + sizeof(int32_t) + pSup->resultRowSize; if (realTotalSize > totalSize) { - char* tmp = taosMemoryRealloc(*result, realTotalSize); + char* tmp = (char*)taosMemoryRealloc(*result, realTotalSize); if (tmp == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; taosMemoryFree(*result); *result = NULL; - longjmp(pOperator->pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY); + return TSDB_CODE_OUT_OF_MEMORY; } else { *result = tmp; } @@ -3659,30 +3467,34 @@ void aggEncodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasi pIter = taosHashIterate(pSup->pResultRowHashTable, pIter); } - if (length) { - *length = offset; - } - return; + *(int32_t*)(*result) = offset; + *length = offset; + + return TDB_CODE_SUCCESS; } -bool aggDecodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasicInfo* pInfo, char* result, - int32_t length) { - if (!result || length <= 0) { - return false; +int32_t aggDecodeResultRow(SOperatorInfo* pOperator, char* result) { + if (result == NULL) { + return TSDB_CODE_TSC_INVALID_INPUT; } + SOptrBasicInfo* pInfo = (SOptrBasicInfo*)(pOperator->info); + SAggSupporter* pSup = (SAggSupporter*)POINTER_SHIFT(pOperator->info, sizeof(SOptrBasicInfo)); // int32_t size = taosHashGetSize(pSup->pResultRowHashTable); - int32_t count = *(int32_t*)(result); - + int32_t length = *(int32_t*)(result); int32_t offset = sizeof(int32_t); + + int32_t count = *(int32_t*)(result + offset); + offset += sizeof(int32_t); + while (count-- > 0 && length > offset) { int32_t keyLen = *(int32_t*)(result + offset); offset += sizeof(int32_t); uint64_t tableGroupId = *(uint64_t*)(result + offset); - SResultRow* resultRow = getNewResultRow_rv(pSup->pResultBuf, tableGroupId, pSup->resultRowSize); + SResultRow* resultRow = getNewResultRow(pSup->pResultBuf, tableGroupId, pSup->resultRowSize); if (!resultRow) { - longjmp(pOperator->pTaskInfo->env, TSDB_CODE_TSC_INVALID_INPUT); + return TSDB_CODE_TSC_INVALID_INPUT; } // add a new result set for a new group @@ -3692,7 +3504,7 @@ bool aggDecodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasi offset += keyLen; int32_t valueLen = *(int32_t*)(result + offset); if (valueLen != pSup->resultRowSize) { - longjmp(pOperator->pTaskInfo->env, TSDB_CODE_TSC_INVALID_INPUT); + return TSDB_CODE_TSC_INVALID_INPUT; } offset += sizeof(int32_t); int32_t pageId = resultRow->pageId; @@ -3703,17 +3515,13 @@ bool aggDecodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasi offset += valueLen; initResultRow(resultRow); - prepareResultListBuffer(&pInfo->resultRowInfo, pOperator->pTaskInfo->env); - // pInfo->resultRowInfo.cur = pInfo->resultRowInfo.size; - // pInfo->resultRowInfo.pPosition[pInfo->resultRowInfo.size++] = - // (SResultRowPosition){.pageId = resultRow->pageId, .offset = resultRow->offset}; pInfo->resultRowInfo.cur = (SResultRowPosition){.pageId = resultRow->pageId, .offset = resultRow->offset}; } if (offset != length) { - longjmp(pOperator->pTaskInfo->env, TSDB_CODE_TSC_INVALID_INPUT); + return TSDB_CODE_TSC_INVALID_INPUT; } - return true; + return TDB_CODE_SUCCESS; } enum { @@ -3825,22 +3633,25 @@ static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) { } #endif + int64_t st = 0; int32_t order = 0; int32_t scanFlag = 0; + if (pOperator->cost.openCost == 0) { + st = taosGetTimestampUs(); + } + SOperatorInfo* downstream = pOperator->pDownstream[0]; while (1) { // The downstream exec may change the value of the newgroup, so use a local variable instead. - publishOperatorProfEvent(downstream, QUERY_PROF_BEFORE_OPERATOR_EXEC); SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream); - publishOperatorProfEvent(downstream, QUERY_PROF_AFTER_OPERATOR_EXEC); - if (pBlock == NULL) { - setTaskStatus(pOperator->pTaskInfo, TASK_COMPLETED); + doSetOperatorCompleted(pOperator); break; } +#if 0 // Return result of the previous group in the firstly. if (false) { if (pRes->info.rows > 0) { @@ -3850,6 +3661,7 @@ static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) { initCtxOutputBuffer(pInfo->pCtx, pOperator->numOfExprs); } } +#endif // the pDataBlock are always the same one, no need to call this again int32_t code = getTableScanInfo(pOperator->pDownstream[0], &order, &scanFlag); @@ -3860,7 +3672,8 @@ static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) { setInputDataBlock(pOperator, pInfo->pCtx, pBlock, order, scanFlag, false); blockDataEnsureCapacity(pInfo->pRes, pInfo->pRes->info.rows + pBlock->info.rows); - code = projectApplyFunctions(pOperator->pExpr, pInfo->pRes, pBlock, pInfo->pCtx, pOperator->numOfExprs, pProjectInfo->pPseudoColInfo); + code = projectApplyFunctions(pOperator->pExpr, pInfo->pRes, pBlock, pInfo->pCtx, pOperator->numOfExprs, + pProjectInfo->pPseudoColInfo); if (code != TSDB_CODE_SUCCESS) { longjmp(pTaskInfo->env, code); } @@ -3875,8 +3688,14 @@ static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) { pProjectInfo->curOutput += pInfo->pRes->info.rows; - // copyTsColoum(pRes, pInfo->pCtx, pOperator->numOfExprs); - return (pInfo->pRes->info.rows > 0) ? pInfo->pRes : NULL; + size_t rows = pInfo->pRes->info.rows; + pOperator->resultInfo.totalRows += rows; + + if (pOperator->cost.openCost == 0) { + pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0; + } + + return (rows > 0) ? pInfo->pRes : NULL; } static void doHandleRemainBlockForNewGroupImpl(SFillOperatorInfo* pInfo, SResultInfo* pResultInfo, bool* newgroup, @@ -3933,10 +3752,7 @@ static SSDataBlock* doFill(SOperatorInfo* pOperator) { SOperatorInfo* pDownstream = pOperator->pDownstream[0]; while (1) { - publishOperatorProfEvent(pDownstream, QUERY_PROF_BEFORE_OPERATOR_EXEC); SSDataBlock* pBlock = pDownstream->fpSet.getNextFn(pDownstream); - publishOperatorProfEvent(pDownstream, QUERY_PROF_AFTER_OPERATOR_EXEC); - if (*newgroup) { assert(pBlock != NULL); } @@ -3990,18 +3806,6 @@ static SSDataBlock* doFill(SOperatorInfo* pOperator) { } } -// todo set the attribute of query scan count -static int32_t getNumOfScanTimes(STaskAttr* pQueryAttr) { - for (int32_t i = 0; i < pQueryAttr->numOfOutput; ++i) { - int32_t functionId = getExprFunctionId(&pQueryAttr->pExpr1[i]); - if (functionId == FUNCTION_STDDEV || functionId == FUNCTION_PERCT) { - return 2; - } - } - - return 1; -} - static void destroyOperatorInfo(SOperatorInfo* pOperator) { if (pOperator == NULL) { return; @@ -4036,6 +3840,21 @@ static void destroyOperatorInfo(SOperatorInfo* pOperator) { taosMemoryFreeClear(pOperator); } +int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaultBufsz) { + *defaultPgsz = 4096; + while (*defaultPgsz < rowSize * 4) { + *defaultPgsz <<= 1u; + } + + // at least four pages need to be in buffer + *defaultBufsz = 4096 * 256; + if ((*defaultBufsz) <= (*defaultPgsz)) { + (*defaultBufsz) = (*defaultPgsz) * 4; + } + + return 0; +} + int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t numOfOutput, size_t keyBufSize, const char* pKey) { _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); @@ -4048,18 +3867,11 @@ int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t n return TSDB_CODE_OUT_OF_MEMORY; } - uint32_t defaultPgsz = 4096; - while (defaultPgsz < pAggSup->resultRowSize * 4) { - defaultPgsz <<= 1u; - } + uint32_t defaultPgsz = 0; + uint32_t defaultBufsz = 0; + getBufferPgSize(pAggSup->resultRowSize, &defaultPgsz, &defaultBufsz); - // at least four pages need to be in buffer - int32_t defaultBufsz = 4096 * 256; - if (defaultBufsz <= defaultPgsz) { - defaultBufsz = defaultPgsz * 4; - } - - int32_t code = createDiskbasedBuf(&pAggSup->pResultBuf, defaultPgsz, defaultBufsz, pKey, "/tmp/"); + int32_t code = createDiskbasedBuf(&pAggSup->pResultBuf, defaultPgsz, defaultBufsz, pKey, TD_TMP_DIR_PATH); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -4096,35 +3908,30 @@ void initResultSizeInfo(SOperatorInfo* pOperator, int32_t numOfRows) { } } -static STableQueryInfo* initTableQueryInfo(const STableGroupInfo* pTableGroupInfo) { - if (pTableGroupInfo->numOfTables == 0) { - return NULL; - } - - STableQueryInfo* pTableQueryInfo = taosMemoryCalloc(pTableGroupInfo->numOfTables, sizeof(STableQueryInfo)); - if (pTableQueryInfo == NULL) { - return NULL; - } - - int32_t index = 0; - for (int32_t i = 0; i < taosArrayGetSize(pTableGroupInfo->pGroupList); ++i) { - SArray* pa = taosArrayGetP(pTableGroupInfo->pGroupList, i); - for (int32_t j = 0; j < taosArrayGetSize(pa); ++j) { - STableKeyInfo* pk = taosArrayGet(pa, j); - STableQueryInfo* pTQueryInfo = &pTableQueryInfo[index++]; - pTQueryInfo->lastKey = pk->lastKey; - } - } - - STimeWindow win = {0, INT64_MAX}; - createTableQueryInfo(pTableQueryInfo, win); - return pTableQueryInfo; -} +// static STableQueryInfo* initTableQueryInfo(const STableListInfo* pTableListInfo) { +// int32_t size = taosArrayGetSize(pTableListInfo->pTableList); +// if (size == 0) { +// return NULL; +// } +// +// STableQueryInfo* pTableQueryInfo = taosMemoryCalloc(size, sizeof(STableQueryInfo)); +// if (pTableQueryInfo == NULL) { +// return NULL; +// } +// +// for (int32_t j = 0; j < size; ++j) { +// STableKeyInfo* pk = taosArrayGet(pTableListInfo->pTableList, j); +// STableQueryInfo* pTQueryInfo = &pTableQueryInfo[j]; +// pTQueryInfo->lastKey = pk->lastKey; +// } +// +// pTableQueryInfo->lastKey = 0; +// return pTableQueryInfo; +//} SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResultBlock, SExprInfo* pScalarExprInfo, - int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo, - const STableGroupInfo* pTableGroupInfo) { + int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo) { SAggOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SAggOperatorInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { @@ -4137,7 +3944,6 @@ SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo* initResultSizeInfo(pOperator, numOfRows); int32_t code = initAggInfo(&pInfo->binfo, &pInfo->aggSup, pExprInfo, numOfCols, pResultBlock, keyBufSize, pTaskInfo->id.str); - pInfo->pTableQueryInfo = initTableQueryInfo(pTableGroupInfo); if (code != TSDB_CODE_SUCCESS) { goto _error; } @@ -4246,9 +4052,9 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SExprInfo* p goto _error; } - pInfo->limit = *pLimit; - pInfo->slimit = *pSlimit; - pInfo->curOffset = pLimit->offset; + pInfo->limit = *pLimit; + pInfo->slimit = *pSlimit; + pInfo->curOffset = pLimit->offset; pInfo->curSOffset = pSlimit->offset; pInfo->binfo.pRes = pResBlock; @@ -4267,15 +4073,15 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SExprInfo* p initAggInfo(&pInfo->binfo, &pInfo->aggSup, pExprInfo, numOfCols, pResBlock, keyBufSize, pTaskInfo->id.str); setFunctionResultOutput(&pInfo->binfo, &pInfo->aggSup, MAIN_SCAN, numOfCols, pTaskInfo); - pInfo->pPseudoColInfo = setRowTsColumnOutputInfo(pInfo->binfo.pCtx, numOfCols); - pOperator->name = "ProjectOperator"; + pInfo->pPseudoColInfo = setRowTsColumnOutputInfo(pInfo->binfo.pCtx, numOfCols); + pOperator->name = "ProjectOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_PROJECT; - pOperator->blocking = false; - pOperator->status = OP_NOT_OPENED; - pOperator->info = pInfo; - pOperator->pExpr = pExprInfo; - pOperator->numOfExprs = num; - pOperator->pTaskInfo = pTaskInfo; + pOperator->blocking = false; + pOperator->status = OP_NOT_OPENED; + pOperator->info = pInfo; + pOperator->pExpr = pExprInfo; + pOperator->numOfExprs = num; + pOperator->pTaskInfo = pTaskInfo; pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doProjectOperation, NULL, NULL, destroyProjectOperatorInfo, NULL, NULL, NULL); @@ -4394,10 +4200,10 @@ static SColumn* createColumn(int32_t blockId, int32_t slotId, int32_t colId, SDa } pCol->slotId = slotId; - pCol->colId = colId; - pCol->bytes = pType->bytes; - pCol->type = pType->type; - pCol->scale = pType->scale; + pCol->colId = colId; + pCol->bytes = pType->bytes; + pCol->type = pType->type; + pCol->scale = pType->scale; pCol->precision = pType->precision; pCol->dataBlockId = blockId; @@ -4472,10 +4278,10 @@ SExprInfo* createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, int32_t* if (strcmp(pExp->pExpr->_function.functionName, "tbname") == 0) { pFuncNode->pParameterList = nodesMakeList(); ASSERT(LIST_LENGTH(pFuncNode->pParameterList) == 0); - SValueNode *res = (SValueNode *)nodesMakeNode(QUERY_NODE_VALUE); - if (NULL == res) { // todo handle error + SValueNode* res = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); + if (NULL == res) { // todo handle error } else { - res->node.resType = (SDataType) {.bytes = sizeof(int64_t), .type = TSDB_DATA_TYPE_BIGINT}; + res->node.resType = (SDataType){.bytes = sizeof(int64_t), .type = TSDB_DATA_TYPE_BIGINT}; nodesListAppend(pFuncNode->pParameterList, res); } } @@ -4535,17 +4341,18 @@ static SExecTaskInfo* createExecTaskInfo(uint64_t queryId, uint64_t taskId, EOPT } static tsdbReaderT doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle, - STableGroupInfo* pTableGroupInfo, uint64_t queryId, uint64_t taskId); + STableListInfo* pTableGroupInfo, uint64_t queryId, uint64_t taskId, + SNode* pTagCond); -static int32_t doCreateTableGroup(void* metaHandle, int32_t tableType, uint64_t tableUid, STableGroupInfo* pGroupInfo, - uint64_t queryId, uint64_t taskId); -static SArray* extractTableIdList(const STableGroupInfo* pTableGroupInfo); +static int32_t getTableList(void* metaHandle, int32_t tableType, uint64_t tableUid, STableListInfo* pListInfo, + SNode* pTagCond); +static SArray* extractTableIdList(const STableListInfo* pTableGroupInfo); static SArray* extractColumnInfo(SNodeList* pNodeList); static SArray* createSortInfo(SNodeList* pNodeList); static SArray* extractPartitionColInfo(SNodeList* pNodeList); -void extractTableSchemaVersion(SReadHandle *pHandle, uint64_t uid, SExecTaskInfo* pTaskInfo) { +void extractTableSchemaVersion(SReadHandle* pHandle, uint64_t uid, SExecTaskInfo* pTaskInfo) { SMetaReader mr = {0}; metaReaderInit(&mr, pHandle->meta, 0); metaGetTableEntryByUid(&mr, uid); @@ -4553,29 +4360,30 @@ void extractTableSchemaVersion(SReadHandle *pHandle, uint64_t uid, SExecTaskInfo pTaskInfo->schemaVer.tablename = strdup(mr.me.name); if (mr.me.type == TSDB_SUPER_TABLE) { - pTaskInfo->schemaVer.sversion = mr.me.stbEntry.schema.sver; - pTaskInfo->schemaVer.tversion = mr.me.stbEntry.schemaTag.sver; + pTaskInfo->schemaVer.sversion = mr.me.stbEntry.schemaRow.version; + pTaskInfo->schemaVer.tversion = mr.me.stbEntry.schemaTag.version; } else if (mr.me.type == TSDB_CHILD_TABLE) { tb_uid_t suid = mr.me.ctbEntry.suid; metaGetTableEntryByUid(&mr, suid); - pTaskInfo->schemaVer.sversion = mr.me.stbEntry.schema.sver; - pTaskInfo->schemaVer.tversion = mr.me.stbEntry.schemaTag.sver; + pTaskInfo->schemaVer.sversion = mr.me.stbEntry.schemaRow.version; + pTaskInfo->schemaVer.tversion = mr.me.stbEntry.schemaTag.version; } else { - pTaskInfo->schemaVer.sversion = mr.me.ntbEntry.schema.sver; + pTaskInfo->schemaVer.sversion = mr.me.ntbEntry.schemaRow.version; } metaReaderClear(&mr); } SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SReadHandle* pHandle, - uint64_t queryId, uint64_t taskId, STableGroupInfo* pTableGroupInfo) { + uint64_t queryId, uint64_t taskId, STableListInfo* pTableListInfo, SNode* pTagCond) { int32_t type = nodeType(pPhyNode); if (pPhyNode->pChildren == NULL || LIST_LENGTH(pPhyNode->pChildren) == 0) { if (QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN == type) { STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pPhyNode; - tsdbReaderT pDataReader = doCreateDataReader(pTableScanNode, pHandle, pTableGroupInfo, (uint64_t)queryId, taskId); + tsdbReaderT pDataReader = + doCreateDataReader(pTableScanNode, pHandle, pTableListInfo, (uint64_t)queryId, taskId, pTagCond); if (pDataReader == NULL && terrno != 0) { return NULL; } @@ -4592,36 +4400,27 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo SSDataBlock* pResBlock = createResDataBlock(pExchange->node.pOutputDataBlockDesc); return createExchangeOperatorInfo(pHandle->pMsgCb->clientRpc, pExchange->pSrcEndPoints, pResBlock, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN == type) { - SScanPhysiNode* pScanPhyNode = (SScanPhysiNode*)pPhyNode; // simple child table. + SScanPhysiNode* pScanPhyNode = (SScanPhysiNode*)pPhyNode; // simple child table. STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pPhyNode; - - int32_t numOfCols = 0; - + STimeWindowAggSupp twSup = { + .waterMark = pTableScanNode->watermark, .calTrigger = pTableScanNode->triggerType, .maxTs = INT64_MIN}; tsdbReaderT pDataReader = NULL; if (pHandle->vnode) { - pDataReader = doCreateDataReader(pTableScanNode, pHandle, pTableGroupInfo, (uint64_t)queryId, taskId); + pDataReader = doCreateDataReader(pTableScanNode, pHandle, pTableListInfo, (uint64_t)queryId, taskId, pTagCond); } else { - doCreateTableGroup(pHandle->meta, pScanPhyNode->tableType, pScanPhyNode->uid, pTableGroupInfo, - queryId, taskId); + getTableList(pHandle->meta, pScanPhyNode->tableType, pScanPhyNode->uid, pTableListInfo, pTagCond); } if (pDataReader == NULL && terrno != 0) { - qDebug("pDataReader is NULL"); + qDebug("%s pDataReader is NULL", GET_TASKID(pTaskInfo)); // return NULL; } else { - qDebug("pDataReader is not NULL"); + qDebug("%s pDataReader is not NULL", GET_TASKID(pTaskInfo)); } + SArray* tableIdList = extractTableIdList(pTableListInfo); + SOperatorInfo* pOperator = createStreamScanOperatorInfo(pDataReader, pHandle, tableIdList, pTableScanNode, + pTaskInfo, &twSup, pTableScanNode->tsColId); - SDataBlockDescNode* pDescNode = pScanPhyNode->node.pOutputDataBlockDesc; - SOperatorInfo* pOperatorDumy = createTableScanOperatorInfo(pTableScanNode, pDataReader, pHandle, pTaskInfo); - - SArray* tableIdList = extractTableIdList(pTableGroupInfo); - - SSDataBlock* pResBlock = createResDataBlock(pDescNode); - SArray* pCols = extractColMatchInfo(pScanPhyNode->pScanCols, pDescNode, &numOfCols, pTaskInfo, COL_MATCH_FROM_COL_ID); - - SOperatorInfo* pOperator = createStreamScanOperatorInfo(pHandle->reader, pDataReader, pHandle, pScanPhyNode->uid, pResBlock, pCols, tableIdList, pTaskInfo, - pScanPhyNode->node.pConditions, pOperatorDumy); taosArrayDestroy(tableIdList); return pOperator; } else if (QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN == type) { @@ -4633,7 +4432,8 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo SSDataBlock* pResBlock = createResDataBlock(pDescNode); int32_t numOfOutputCols = 0; - SArray* colList = extractColMatchInfo(pScanNode->pScanCols, pDescNode, &numOfOutputCols, pTaskInfo, COL_MATCH_FROM_COL_ID); + SArray* colList = + extractColMatchInfo(pScanNode->pScanCols, pDescNode, &numOfOutputCols, pTaskInfo, COL_MATCH_FROM_COL_ID); SOperatorInfo* pOperator = createSysTableScanOperatorInfo( pHandle, pResBlock, &pScanNode->tableName, pScanNode->node.pConditions, pSysScanPhyNode->mgmtEpSet, colList, pTaskInfo, pSysScanPhyNode->showRewrite, pSysScanPhyNode->accountId); @@ -4645,8 +4445,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo SSDataBlock* pResBlock = createResDataBlock(pDescNode); - int32_t code = doCreateTableGroup(pHandle->meta, pScanPhyNode->tableType, pScanPhyNode->uid, pTableGroupInfo, - queryId, taskId); + int32_t code = getTableList(pHandle->meta, pScanPhyNode->tableType, pScanPhyNode->uid, pTableListInfo, pTagCond); if (code != TSDB_CODE_SUCCESS) { return NULL; } @@ -4655,11 +4454,11 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo SExprInfo* pExprInfo = createExprInfo(pScanPhyNode->pScanPseudoCols, NULL, &num); int32_t numOfOutputCols = 0; - SArray* colList = - extractColMatchInfo(pScanPhyNode->pScanPseudoCols, pDescNode, &numOfOutputCols, pTaskInfo, COL_MATCH_FROM_COL_ID); + SArray* colList = extractColMatchInfo(pScanPhyNode->pScanPseudoCols, pDescNode, &numOfOutputCols, pTaskInfo, + COL_MATCH_FROM_COL_ID); SOperatorInfo* pOperator = - createTagScanOperatorInfo(pHandle, pExprInfo, num, pResBlock, colList, pTableGroupInfo, pTaskInfo); + createTagScanOperatorInfo(pHandle, pExprInfo, num, pResBlock, colList, pTableListInfo, pTaskInfo); return pOperator; } else { ASSERT(0); @@ -4672,7 +4471,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo SOperatorInfo** ops = taosMemoryCalloc(size, POINTER_BYTES); for (int32_t i = 0; i < size; ++i) { SPhysiNode* pChildNode = (SPhysiNode*)nodesListGetNode(pPhyNode->pChildren, i); - ops[i] = createOperatorTree(pChildNode, pTaskInfo, pHandle, queryId, taskId, pTableGroupInfo); + ops[i] = createOperatorTree(pChildNode, pTaskInfo, pHandle, queryId, taskId, pTableListInfo, pTagCond); if (ops[i] == NULL) { return NULL; } @@ -4701,10 +4500,10 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo if (pAggNode->pGroupKeys != NULL) { SArray* pColList = extractColumnInfo(pAggNode->pGroupKeys); pOptr = createGroupOperatorInfo(ops[0], pExprInfo, num, pResBlock, pColList, pAggNode->node.pConditions, - pScalarExprInfo, numOfScalarExpr, pTaskInfo, NULL); + pScalarExprInfo, numOfScalarExpr, pTaskInfo); } else { - pOptr = createAggregateOperatorInfo(ops[0], pExprInfo, num, pResBlock, pScalarExprInfo, numOfScalarExpr, - pTaskInfo, pTableGroupInfo); + pOptr = + createAggregateOperatorInfo(ops[0], pExprInfo, num, pResBlock, pScalarExprInfo, numOfScalarExpr, pTaskInfo); } } else if (QUERY_NODE_PHYSICAL_PLAN_INTERVAL == type || QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL == type) { SIntervalPhysiNode* pIntervalPhyNode = (SIntervalPhysiNode*)pPhyNode; @@ -4720,11 +4519,11 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo .precision = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->node.resType.precision}; STimeWindowAggSupp as = {.waterMark = pIntervalPhyNode->window.watermark, - .calTrigger = pIntervalPhyNode->window.triggerType}; + .calTrigger = pIntervalPhyNode->window.triggerType, + .maxTs = INT64_MIN}; int32_t tsSlotId = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId; - pOptr = createIntervalOperatorInfo(ops[0], pExprInfo, num, pResBlock, &interval, tsSlotId, &as, pTableGroupInfo, - pTaskInfo); + pOptr = createIntervalOperatorInfo(ops[0], pExprInfo, num, pResBlock, &interval, tsSlotId, &as, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_SORT == type) { SSortPhysiNode* pSortPhyNode = (SSortPhysiNode*)pPhyNode; @@ -4738,7 +4537,8 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo SExprInfo* pExprInfo = createExprInfo(pSortPhyNode->pExprs, NULL, &numOfCols); int32_t numOfOutputCols = 0; - SArray* pColList = extractColMatchInfo(pSortPhyNode->pTargets, pDescNode, &numOfOutputCols, pTaskInfo, COL_MATCH_FROM_SLOT_ID); + SArray* pColList = + extractColMatchInfo(pSortPhyNode->pTargets, pDescNode, &numOfOutputCols, pTaskInfo, COL_MATCH_FROM_SLOT_ID); pOptr = createSortOperatorInfo(ops[0], pResBlock, info, pExprInfo, numOfCols, pColList, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW == type) { @@ -4753,13 +4553,26 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo pOptr = createSessionAggOperatorInfo(ops[0], pExprInfo, num, pResBlock, pSessionNode->gap, tsSlotId, &as, pTaskInfo); + } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW == type) { + SSessionWinodwPhysiNode* pSessionNode = (SSessionWinodwPhysiNode*)pPhyNode; + + STimeWindowAggSupp as = {.waterMark = pSessionNode->window.watermark, + .calTrigger = pSessionNode->window.triggerType}; + + SExprInfo* pExprInfo = createExprInfo(pSessionNode->window.pFuncs, NULL, &num); + SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc); + int32_t tsSlotId = ((SColumnNode*)pSessionNode->window.pTspk)->slotId; + + pOptr = createStreamSessionAggOperatorInfo(ops[0], pExprInfo, num, pResBlock, pSessionNode->gap, tsSlotId, &as, + pTaskInfo); + } else if (QUERY_NODE_PHYSICAL_PLAN_PARTITION == type) { SPartitionPhysiNode* pPartNode = (SPartitionPhysiNode*)pPhyNode; SArray* pColList = extractPartitionColInfo(pPartNode->pPartitionKeys); SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc); SExprInfo* pExprInfo = createExprInfo(pPartNode->pTargets, NULL, &num); - pOptr = createPartitionOperatorInfo(ops[0], pExprInfo, num, pResBlock, pColList, pTaskInfo, NULL); + pOptr = createPartitionOperatorInfo(ops[0], pExprInfo, num, pResBlock, pColList, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW == type) { SStateWinodwPhysiNode* pStateNode = (SStateWinodwPhysiNode*)pPhyNode; @@ -4770,7 +4583,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo int32_t tsSlotId = ((SColumnNode*)pStateNode->window.pTspk)->slotId; SColumnNode* pColNode = (SColumnNode*)((STargetNode*)pStateNode->pStateKey)->pExpr; - SColumn col = extractColumnFromColumnNode(pColNode); + SColumn col = extractColumnFromColumnNode(pColNode); pOptr = createStatewindowOperatorInfo(ops[0], pExprInfo, num, pResBlock, &as, tsSlotId, &col, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_JOIN == type) { SJoinPhysiNode* pJoinNode = (SJoinPhysiNode*)pPhyNode; @@ -4794,6 +4607,18 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo return pOptr; } +int32_t compareTimeWindow(const void* p1, const void* p2, const void* param) { + const SQueryTableDataCond* pCond = param; + const STimeWindow* pWin1 = p1; + const STimeWindow* pWin2 = p2; + if (pCond->order == TSDB_ORDER_ASC) { + return pWin1->skey - pWin2->skey; + } else if (pCond->order == TSDB_ORDER_DESC) { + return pWin2->skey - pWin1->skey; + } + return 0; +} + int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysiNode* pTableScanNode) { pCond->loadExternalRows = false; @@ -4805,16 +4630,30 @@ int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysi return terrno; } - pCond->twindow = pTableScanNode->scanRange; + // pCond->twindow = pTableScanNode->scanRange; + // TODO: get it from stable scan node + pCond->numOfTWindows = 1; + pCond->twindows = taosMemoryCalloc(pCond->numOfTWindows, sizeof(STimeWindow)); + pCond->twindows[0] = pTableScanNode->scanRange; #if 1 // todo work around a problem, remove it later - if ((pCond->order == TSDB_ORDER_ASC && pCond->twindow.skey > pCond->twindow.ekey) || - (pCond->order == TSDB_ORDER_DESC && pCond->twindow.skey < pCond->twindow.ekey)) { - TSWAP(pCond->twindow.skey, pCond->twindow.ekey); + for (int32_t i = 0; i < pCond->numOfTWindows; ++i) { + if ((pCond->order == TSDB_ORDER_ASC && pCond->twindows[i].skey > pCond->twindows[i].ekey) || + (pCond->order == TSDB_ORDER_DESC && pCond->twindows[i].skey < pCond->twindows[i].ekey)) { + TSWAP(pCond->twindows[i].skey, pCond->twindows[i].ekey); + } } #endif + for (int32_t i = 0; i < pCond->numOfTWindows; ++i) { + if ((pCond->order == TSDB_ORDER_ASC && pCond->twindows[i].skey > pCond->twindows[i].ekey) || + (pCond->order == TSDB_ORDER_DESC && pCond->twindows[i].skey < pCond->twindows[i].ekey)) { + TSWAP(pCond->twindows[i].skey, pCond->twindows[i].ekey); + } + } + taosqsort(pCond->twindows, pCond->numOfTWindows, sizeof(STimeWindow), pCond, compareTimeWindow); + pCond->type = BLOCK_LOAD_OFFSET_SEQ_ORDER; // pCond->type = pTableScanNode->scanFlag; @@ -4838,11 +4677,11 @@ int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysi SColumn extractColumnFromColumnNode(SColumnNode* pColNode) { SColumn c = {0}; - c.slotId = pColNode->slotId; - c.colId = pColNode->colId; - c.type = pColNode->node.resType.type; - c.bytes = pColNode->node.resType.bytes; - c.scale = pColNode->node.resType.scale; + c.slotId = pColNode->slotId; + c.colId = pColNode->colId; + c.type = pColNode->node.resType.type; + c.bytes = pColNode->node.resType.bytes; + c.scale = pColNode->node.resType.scale; c.precision = pColNode->node.resType.precision; return c; } @@ -4973,46 +4812,62 @@ SArray* extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNod return pList; } -int32_t doCreateTableGroup(void* metaHandle, int32_t tableType, uint64_t tableUid, STableGroupInfo* pGroupInfo, - uint64_t queryId, uint64_t taskId) { - int32_t code = 0; +int32_t getTableList(void* metaHandle, int32_t tableType, uint64_t tableUid, STableListInfo* pListInfo, + SNode* pTagCond) { + int32_t code = TSDB_CODE_SUCCESS; + pListInfo->pTableList = taosArrayInit(8, sizeof(STableKeyInfo)); + if (tableType == TSDB_SUPER_TABLE) { - code = tsdbQuerySTableByTagCond(metaHandle, tableUid, 0, NULL, 0, 0, NULL, pGroupInfo, NULL, 0, queryId, taskId); + if (pTagCond) { + SIndexMetaArg metaArg = {.metaHandle = tsdbGetIdx(metaHandle), .suid = tableUid}; + + SArray* res = taosArrayInit(8, sizeof(uint64_t)); + code = doFilterTag(pTagCond, &metaArg, res); + if (code != TSDB_CODE_SUCCESS) { + qError("failed to get tableIds, reason: %s, suid: %" PRIu64 "", tstrerror(code), tableUid); + taosArrayDestroy(res); + terrno = code; + return code; + } else { + qDebug("sucess to get tableIds, size: %d, suid: %" PRIu64 "", (int)taosArrayGetSize(res), tableUid); + } + for (int i = 0; i < taosArrayGetSize(res); i++) { + STableKeyInfo info = {.lastKey = TSKEY_INITIAL_VAL, .uid = *(uint64_t*)taosArrayGet(res, i)}; + taosArrayPush(pListInfo->pTableList, &info); + } + taosArrayDestroy(res); + } else { + code = tsdbGetAllTableList(metaHandle, tableUid, pListInfo->pTableList); + } } else { // Create one table group. - code = tsdbGetOneTableGroup(metaHandle, tableUid, 0, pGroupInfo); + STableKeyInfo info = {.lastKey = 0, .uid = tableUid}; + taosArrayPush(pListInfo->pTableList, &info); } return code; } -SArray* extractTableIdList(const STableGroupInfo* pTableGroupInfo) { +SArray* extractTableIdList(const STableListInfo* pTableGroupInfo) { SArray* tableIdList = taosArrayInit(4, sizeof(uint64_t)); - if (pTableGroupInfo->numOfTables > 0) { - SArray* pa = taosArrayGetP(pTableGroupInfo->pGroupList, 0); - ASSERT(taosArrayGetSize(pTableGroupInfo->pGroupList) == 1); - - // Transfer the Array of STableKeyInfo into uid list. - size_t numOfTables = taosArrayGetSize(pa); - for (int32_t i = 0; i < numOfTables; ++i) { - STableKeyInfo* pkeyInfo = taosArrayGet(pa, i); - taosArrayPush(tableIdList, &pkeyInfo->uid); - } + // Transfer the Array of STableKeyInfo into uid list. + for (int32_t i = 0; i < taosArrayGetSize(pTableGroupInfo->pTableList); ++i) { + STableKeyInfo* pkeyInfo = taosArrayGet(pTableGroupInfo->pTableList, i); + taosArrayPush(tableIdList, &pkeyInfo->uid); } return tableIdList; } tsdbReaderT doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle, - STableGroupInfo* pTableGroupInfo, uint64_t queryId, uint64_t taskId) { - uint64_t uid = pTableScanNode->scan.uid; - int32_t code = - doCreateTableGroup(pHandle->meta, pTableScanNode->scan.tableType, uid, pTableGroupInfo, queryId, taskId); + STableListInfo* pTableListInfo, uint64_t queryId, uint64_t taskId, SNode* pTagCond) { + int32_t code = + getTableList(pHandle->meta, pTableScanNode->scan.tableType, pTableScanNode->scan.uid, pTableListInfo, pTagCond); if (code != TSDB_CODE_SUCCESS) { goto _error; } - if (pTableGroupInfo->numOfTables == 0) { + if (taosArrayGetSize(pTableListInfo->pTableList) == 0) { code = 0; qDebug("no table qualified for query, TID:0x%" PRIx64 ", QID:0x%" PRIx64, taskId, queryId); goto _error; @@ -5024,13 +4879,100 @@ tsdbReaderT doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle* goto _error; } - return tsdbQueryTables(pHandle->vnode, &cond, pTableGroupInfo, queryId, taskId); + return tsdbQueryTables(pHandle->vnode, &cond, pTableListInfo, queryId, taskId); _error: terrno = code; return NULL; } +int32_t encodeOperator(SOperatorInfo* ops, char** result, int32_t* length) { + int32_t code = TDB_CODE_SUCCESS; + char* pCurrent = NULL; + int32_t currLength = 0; + if (ops->fpSet.encodeResultRow) { + if (result == NULL || length == NULL) { + return TSDB_CODE_TSC_INVALID_INPUT; + } + code = ops->fpSet.encodeResultRow(ops, &pCurrent, &currLength); + + if (code != TDB_CODE_SUCCESS) { + if (*result != NULL) { + taosMemoryFree(*result); + *result = NULL; + } + return code; + } + + if (*result == NULL) { + *result = (char*)taosMemoryCalloc(1, currLength + sizeof(int32_t)); + if (*result == NULL) { + taosMemoryFree(pCurrent); + return TSDB_CODE_OUT_OF_MEMORY; + } + memcpy(*result + sizeof(int32_t), pCurrent, currLength); + *(int32_t*)(*result) = currLength + sizeof(int32_t); + } else { + int32_t sizePre = *(int32_t*)(*result); + char* tmp = (char*)taosMemoryRealloc(*result, sizePre + currLength); + if (tmp == NULL) { + taosMemoryFree(pCurrent); + taosMemoryFree(*result); + *result = NULL; + return TSDB_CODE_OUT_OF_MEMORY; + } + *result = tmp; + memcpy(*result + sizePre, pCurrent, currLength); + *(int32_t*)(*result) += currLength; + } + taosMemoryFree(pCurrent); + *length = *(int32_t*)(*result); + } + + for (int32_t i = 0; i < ops->numOfDownstream; ++i) { + code = encodeOperator(ops->pDownstream[i], result, length); + if (code != TDB_CODE_SUCCESS) { + return code; + } + } + return TDB_CODE_SUCCESS; +} + +int32_t decodeOperator(SOperatorInfo* ops, char* result, int32_t length) { + int32_t code = TDB_CODE_SUCCESS; + if (ops->fpSet.decodeResultRow) { + if (result == NULL) { + return TSDB_CODE_TSC_INVALID_INPUT; + } + ASSERT(length == *(int32_t*)result); + char* data = result + sizeof(int32_t); + code = ops->fpSet.decodeResultRow(ops, data); + if (code != TDB_CODE_SUCCESS) { + return code; + } + + int32_t totalLength = *(int32_t*)result; + int32_t dataLength = *(int32_t*)data; + + if (totalLength == dataLength + sizeof(int32_t)) { // the last data + result = NULL; + length = 0; + } else { + result += dataLength; + *(int32_t*)(result) = totalLength - dataLength; + length = totalLength - dataLength; + } + } + + for (int32_t i = 0; i < ops->numOfDownstream; ++i) { + code = decodeOperator(ops->pDownstream[i], result, length); + if (code != TDB_CODE_SUCCESS) { + return code; + } + } + return TDB_CODE_SUCCESS; +} + int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId, EOPTR_EXEC_MODEL model) { uint64_t queryId = pPlan->id.queryId; @@ -5042,8 +4984,8 @@ int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SRead goto _complete; } - (*pTaskInfo)->pRoot = - createOperatorTree(pPlan->pNode, *pTaskInfo, pHandle, queryId, taskId, &(*pTaskInfo)->tableqinfoGroupInfo); + (*pTaskInfo)->pRoot = createOperatorTree(pPlan->pNode, *pTaskInfo, pHandle, queryId, taskId, + &(*pTaskInfo)->tableqinfoList, pPlan->pTagCond); if (NULL == (*pTaskInfo)->pRoot) { code = terrno; goto _complete; @@ -5102,34 +5044,18 @@ void freeColumnFilterInfo(SColumnFilterInfo* pFilter, int32_t numOfFilters) { taosMemoryFree(pFilter); } -static void doDestroyTableQueryInfo(STableGroupInfo* pTableqinfoGroupInfo) { - if (pTableqinfoGroupInfo->pGroupList != NULL) { - int32_t numOfGroups = (int32_t)taosArrayGetSize(pTableqinfoGroupInfo->pGroupList); - for (int32_t i = 0; i < numOfGroups; ++i) { - SArray* p = taosArrayGetP(pTableqinfoGroupInfo->pGroupList, i); - - size_t num = taosArrayGetSize(p); - for (int32_t j = 0; j < num; ++j) { - STableQueryInfo* item = taosArrayGetP(p, j); - destroyTableQueryInfoImpl(item); - } - - taosArrayDestroy(p); - } - } - - taosArrayDestroy(pTableqinfoGroupInfo->pGroupList); - taosHashCleanup(pTableqinfoGroupInfo->map); +static void doDestroyTableList(STableListInfo* pTableqinfoList) { + taosArrayDestroy(pTableqinfoList->pTableList); + taosHashCleanup(pTableqinfoList->map); - pTableqinfoGroupInfo->pGroupList = NULL; - pTableqinfoGroupInfo->map = NULL; - pTableqinfoGroupInfo->numOfTables = 0; + pTableqinfoList->pTableList = NULL; + pTableqinfoList->map = NULL; } void doDestroyTask(SExecTaskInfo* pTaskInfo) { qDebug("%s execTask is freed", GET_TASKID(pTaskInfo)); - doDestroyTableQueryInfo(&pTaskInfo->tableqinfoGroupInfo); + doDestroyTableList(&pTaskInfo->tableqinfoList); destroyOperatorInfo(pTaskInfo->pRoot); // taosArrayDestroy(pTaskInfo->summary.queryProfEvents); // taosHashCleanup(pTaskInfo->summary.operatorProfResults); @@ -5213,16 +5139,21 @@ int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SExplainExecInfo } } - (*pRes)[*resNum].numOfRows = operatorInfo->resultInfo.totalRows; - (*pRes)[*resNum].startupCost = operatorInfo->cost.openCost; - (*pRes)[*resNum].totalCost = operatorInfo->cost.totalCost; + SExplainExecInfo* pInfo = &(*pRes)[*resNum]; + + pInfo->numOfRows = operatorInfo->resultInfo.totalRows; + pInfo->startupCost = operatorInfo->cost.openCost; + pInfo->totalCost = operatorInfo->cost.totalCost; if (operatorInfo->fpSet.getExplainFn) { - int32_t code = (*operatorInfo->fpSet.getExplainFn)(operatorInfo, &(*pRes)->verboseInfo); + int32_t code = operatorInfo->fpSet.getExplainFn(operatorInfo, &pInfo->verboseInfo, &pInfo->verboseLen); if (code) { - qError("operator getExplainFn failed, error:%s", tstrerror(code)); + qError("%s operator getExplainFn failed, code:%s", GET_TASKID(operatorInfo->pTaskInfo), tstrerror(code)); return code; } + } else { + pInfo->verboseLen = 0; + pInfo->verboseInfo = NULL; } ++(*resNum); @@ -5239,15 +5170,22 @@ int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SExplainExecInfo return TSDB_CODE_SUCCESS; } -int32_t initCatchSupporter(SCatchSupporter* pCatchSup, size_t rowSize, size_t keyBufSize, - const char* pKey, const char* pDir) { - pCatchSup->keySize = sizeof(int64_t) + sizeof(int64_t) + sizeof(TSKEY); - pCatchSup->pKeyBuf = taosMemoryCalloc(1, pCatchSup->keySize); - int32_t pageSize = rowSize * 32; - int32_t bufSize = pageSize * 4096; - createDiskbasedBuf(&pCatchSup->pDataBuf, pageSize, bufSize, pKey, pDir); - _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); - pCatchSup->pWindowHashTable = taosHashInit(10000, hashFn, true, HASH_NO_LOCK);; - return TSDB_CODE_SUCCESS; -} +int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey) { + pSup->keySize = sizeof(int64_t) + sizeof(TSKEY); + pSup->pKeyBuf = taosMemoryCalloc(1, pSup->keySize); + pSup->pResultRows = taosArrayInit(1024, sizeof(SResultWindowInfo)); + if (pSup->pKeyBuf == NULL || pSup->pResultRows == NULL) { + return TSDB_CODE_OUT_OF_MEMORY; + } + int32_t pageSize = 4096; + while (pageSize < pSup->resultRowSize * 4) { + pageSize <<= 1u; + } + // at least four pages need to be in buffer + int32_t bufSize = 4096 * 256; + if (bufSize <= pageSize) { + bufSize = pageSize * 4; + } + return createDiskbasedBuf(&pSup->pResultBuf, pageSize, bufSize, pKey, TD_TMP_DIR_PATH); +} diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c index 7606374cdbc1572fbf15112c41d24ca0ac7e2532..8c3a0c0e6e712ad07a381b3baa709b095ba955fb 100644 --- a/source/libs/executor/src/groupoperator.c +++ b/source/libs/executor/src/groupoperator.c @@ -110,9 +110,11 @@ static bool groupKeyCompare(SArray* pGroupCols, SArray* pGroupColVals, SSDataBlo return true; } -static void recordNewGroupKeys(SArray* pGroupCols, SArray* pGroupColVals, SSDataBlock* pBlock, int32_t rowIndex, int32_t numOfGroupCols) { +static void recordNewGroupKeys(SArray* pGroupCols, SArray* pGroupColVals, SSDataBlock* pBlock, int32_t rowIndex) { SColumnDataAgg* pColAgg = NULL; + size_t numOfGroupCols = taosArrayGetSize(pGroupCols); + for (int32_t i = 0; i < numOfGroupCols; ++i) { SColumn* pCol = taosArrayGet(pGroupCols, i); SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, pCol->slotId); @@ -208,7 +210,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) { for (int32_t j = 0; j < pBlock->info.rows; ++j) { // Compare with the previous row of this column, and do not set the output buffer again if they are identical. if (!pInfo->isInit) { - recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j, numOfGroupCols); + recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j); pInfo->isInit = true; num++; continue; @@ -223,7 +225,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) { // The first row of a new block does not belongs to the previous existed group if (j == 0) { num++; - recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j, numOfGroupCols); + recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j); continue; } @@ -238,7 +240,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) { // assign the group keys or user input constant values if required doAssignGroupKeys(pCtx, pOperator->numOfExprs, pBlock->info.rows, rowIndex); - recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j, numOfGroupCols); + recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j); num = 1; } @@ -269,25 +271,35 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) { if (pOperator->status == OP_RES_TO_RETURN) { doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); - if (pRes->info.rows == 0 || !hashRemainDataInGroupInfo(&pInfo->groupResInfo)) { - pOperator->status = OP_EXEC_DONE; + + size_t rows = pRes->info.rows; + if (rows == 0 || !hashRemainDataInGroupInfo(&pInfo->groupResInfo)) { + doSetOperatorCompleted(pOperator); } + + pOperator->resultInfo.totalRows += rows; return (pRes->info.rows == 0)? NULL:pRes; } - int32_t order = TSDB_ORDER_ASC; + int32_t order = TSDB_ORDER_ASC; + int32_t scanFlag = MAIN_SCAN; + + int64_t st = taosGetTimestampUs(); SOperatorInfo* downstream = pOperator->pDownstream[0]; while (1) { - publishOperatorProfEvent(downstream, QUERY_PROF_BEFORE_OPERATOR_EXEC); SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream); - publishOperatorProfEvent(downstream, QUERY_PROF_AFTER_OPERATOR_EXEC); if (pBlock == NULL) { break; } + int32_t code = getTableScanInfo(pOperator, &order, &scanFlag); + if (code != TSDB_CODE_SUCCESS) { + longjmp(pTaskInfo->env, code); + } + // the pDataBlock are always the same one, no need to call this again - setInputDataBlock(pOperator, pInfo->binfo.pCtx, pBlock, order, MAIN_SCAN, true); + setInputDataBlock(pOperator, pInfo->binfo.pCtx, pBlock, order, scanFlag, true); // there is an scalar expression that needs to be calculated right before apply the group aggregation. if (pInfo->pScalarExprInfo != NULL) { @@ -297,7 +309,6 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) { } } - // setTagValue(pOperator, pRuntimeEnv->current->pTable, pInfo->binfo.pCtx, pOperator->numOfExprs); doHashGroupbyAgg(pOperator, pBlock); } @@ -309,17 +320,32 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) { // updateNumOfRowsInResultRows(pInfo->binfo.pCtx, pOperator->numOfExprs, &pInfo->binfo.resultRowInfo, // pInfo->binfo.rowCellInfoOffset); // } - +#if 0 + if(pOperator->fpSet.encodeResultRow){ + char *result = NULL; + int32_t length = 0; + pOperator->fpSet.encodeResultRow(pOperator, &result, &length); + SAggSupporter* pSup = &pInfo->aggSup; + taosHashClear(pSup->pResultRowHashTable); + pInfo->binfo.resultRowInfo.size = 0; + pOperator->fpSet.decodeResultRow(pOperator, result); + if(result){ + taosMemoryFree(result); + } + } +#endif blockDataEnsureCapacity(pRes, pOperator->resultInfo.capacity); initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, 0); + pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0; + while(1) { doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); doFilter(pInfo->pCondition, pRes, NULL); bool hasRemain = hashRemainDataInGroupInfo(&pInfo->groupResInfo); if (!hasRemain) { - pOperator->status = OP_EXEC_DONE; + doSetOperatorCompleted(pOperator); break; } @@ -328,11 +354,14 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) { } } - return (pRes->info.rows == 0)? NULL:pRes; + size_t rows = pRes->info.rows; + pOperator->resultInfo.totalRows += rows; + + return (rows == 0)? NULL:pRes; } SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResultBlock, SArray* pGroupColList, - SNode* pCondition, SExprInfo* pScalarExprInfo, int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo, const STableGroupInfo* pTableGroupInfo) { + SNode* pCondition, SExprInfo* pScalarExprInfo, int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo) { SGroupbyOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SGroupbyOperatorInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { @@ -382,7 +411,7 @@ static void doHashPartition(SOperatorInfo* pOperator, SSDataBlock* pBlock) { int32_t numOfGroupCols = taosArrayGetSize(pInfo->pGroupCols); for (int32_t j = 0; j < pBlock->info.rows; ++j) { - recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j, numOfGroupCols); + recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j); int32_t len = buildGroupKeys(pInfo->keyBuf, pInfo->pGroupColVals); SDataGroupInfo* pGInfo = NULL; @@ -425,7 +454,6 @@ static void doHashPartition(SOperatorInfo* pOperator, SSDataBlock* pBlock) { memcpy(data + (*columnLen), src, varDataTLen(src)); int32_t v = (data + (*columnLen) + varDataTLen(src) - (char*)pPage); ASSERT(v > 0); - printf("len:%d\n", v); contentLen = varDataTLen(src); } @@ -476,16 +504,13 @@ void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInf int32_t *rows = (int32_t*) pPage; if (*rows >= pInfo->rowCapacity) { + // release buffer + releaseBufPage(pInfo->pBuf, pPage); + // add a new page for current group int32_t pageId = 0; pPage = getNewBufPage(pInfo->pBuf, 0, &pageId); taosArrayPush(p->pPageList, &pageId); - -// // number of rows -// *(int32_t*) pPage = 0; -// -// uint64_t* groupId = (pPage + sizeof(int32_t)); -// *groupId = 0; memset(pPage, 0, getBufPageSize(pInfo->pBuf)); } } @@ -538,7 +563,7 @@ static SSDataBlock* buildPartitionResult(SOperatorInfo* pOperator) { // try next group data pInfo->pGroupIter = taosHashIterate(pInfo->pGroupSet, pInfo->pGroupIter); if (pInfo->pGroupIter == NULL) { - pOperator->status = OP_EXEC_DONE; + doSetOperatorCompleted(pOperator); return NULL; } @@ -552,9 +577,12 @@ static SSDataBlock* buildPartitionResult(SOperatorInfo* pOperator) { blockDataFromBuf1(pInfo->binfo.pRes, page, pInfo->rowCapacity); pInfo->pageIndex += 1; + releaseBufPage(pInfo->pBuf, page); blockDataUpdateTsWindow(pInfo->binfo.pRes, 0); pInfo->binfo.pRes->info.groupId = pGroupInfo->groupId; + + pOperator->resultInfo.totalRows += pInfo->binfo.pRes->info.rows; return pInfo->binfo.pRes; } @@ -571,12 +599,11 @@ static SSDataBlock* hashPartition(SOperatorInfo* pOperator) { return buildPartitionResult(pOperator); } + int64_t st = taosGetTimestampUs(); SOperatorInfo* downstream = pOperator->pDownstream[0]; while (1) { - publishOperatorProfEvent(downstream, QUERY_PROF_BEFORE_OPERATOR_EXEC); SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream); - publishOperatorProfEvent(downstream, QUERY_PROF_AFTER_OPERATOR_EXEC); if (pBlock == NULL) { break; } @@ -584,6 +611,8 @@ static SSDataBlock* hashPartition(SOperatorInfo* pOperator) { doHashPartition(pOperator, pBlock); } + pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0; + pOperator->status = OP_RES_TO_RETURN; blockDataEnsureCapacity(pRes, 4096); return buildPartitionResult(pOperator); @@ -599,7 +628,7 @@ static void destroyPartitionOperatorInfo(void* param, int32_t numOfOutput) { } SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResultBlock, SArray* pGroupColList, - SExecTaskInfo* pTaskInfo, const STableGroupInfo* pTableGroupInfo) { + SExecTaskInfo* pTaskInfo) { SPartitionOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SPartitionOperatorInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { @@ -614,7 +643,11 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SExprInfo* goto _error; } - int32_t code = createDiskbasedBuf(&pInfo->pBuf, 4096, 4096 * 256, pTaskInfo->id.str, "/tmp/"); + uint32_t defaultPgsz = 0; + uint32_t defaultBufsz = 0; + getBufferPgSize(pResultBlock->info.rowSize, &defaultPgsz, &defaultBufsz); + + int32_t code = createDiskbasedBuf(&pInfo->pBuf, defaultPgsz, defaultBufsz, pTaskInfo->id.str, TD_TMP_DIR_PATH); if (code != TSDB_CODE_SUCCESS) { goto _error; } @@ -627,13 +660,14 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SExprInfo* } pOperator->name = "PartitionOperator"; - pOperator->blocking = true; + pOperator->blocking = true; pOperator->status = OP_NOT_OPENED; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_PARTITION; pInfo->binfo.pRes = pResultBlock; - pOperator->numOfExprs = numOfCols; + pOperator->numOfExprs = numOfCols; pOperator->pExpr = pExprInfo; pOperator->info = pInfo; + pOperator->pTaskInfo = pTaskInfo; pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, hashPartition, NULL, NULL, destroyPartitionOperatorInfo, NULL, NULL, NULL); diff --git a/source/libs/executor/src/joinoperator.c b/source/libs/executor/src/joinoperator.c index d7d6d963463bb400f940119d2192b63ddb7de16a..ad9e4d63f0a7475e990fe9f161d419458a5a9cf8 100644 --- a/source/libs/executor/src/joinoperator.c +++ b/source/libs/executor/src/joinoperator.c @@ -98,9 +98,7 @@ SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator) { // todo extract method if (pJoinInfo->pLeft == NULL || pJoinInfo->leftPos >= pJoinInfo->pLeft->info.rows) { SOperatorInfo* ds1 = pOperator->pDownstream[0]; - publishOperatorProfEvent(ds1, QUERY_PROF_BEFORE_OPERATOR_EXEC); pJoinInfo->pLeft = ds1->fpSet.getNextFn(ds1); - publishOperatorProfEvent(ds1, QUERY_PROF_AFTER_OPERATOR_EXEC); pJoinInfo->leftPos = 0; if (pJoinInfo->pLeft == NULL) { @@ -111,9 +109,7 @@ SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator) { if (pJoinInfo->pRight == NULL || pJoinInfo->rightPos >= pJoinInfo->pRight->info.rows) { SOperatorInfo* ds2 = pOperator->pDownstream[1]; - publishOperatorProfEvent(ds2, QUERY_PROF_BEFORE_OPERATOR_EXEC); pJoinInfo->pRight = ds2->fpSet.getNextFn(ds2); - publishOperatorProfEvent(ds2, QUERY_PROF_AFTER_OPERATOR_EXEC); pJoinInfo->rightPos = 0; if (pJoinInfo->pRight == NULL) { diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index d4225caa71d0885e8aba3f4148dec9917d20dbd1..48b6e8b7200c84fb7681b23824417325b53c6347 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -13,8 +13,8 @@ * along with this program. If not, see . */ -#include "function.h" #include "filter.h" +#include "function.h" #include "functionMgt.h" #include "os.h" #include "querynodes.h" @@ -142,7 +142,7 @@ static bool overlapWithTimeWindow(SInterval* pInterval, SDataBlockInfo* pBlockIn return true; } - while(1) { + while (1) { getNextTimeWindow(pInterval, &w, order); if (w.ekey < pBlockInfo->window.skey) { break; @@ -158,7 +158,7 @@ static bool overlapWithTimeWindow(SInterval* pInterval, SDataBlockInfo* pBlockIn return false; } -static void addTagPseudoColumnData(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock); +static void addTagPseudoColumnData(SReadHandle *pHandle, SExprInfo* pPseudoExpr, int32_t numOfPseudoExpr, SSDataBlock* pBlock); static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableScanInfo, SSDataBlock* pBlock, uint32_t* status) { @@ -190,7 +190,7 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca pCost->skipBlocks += 1; // clear all data in pBlock that are set when handing the previous block - for(int32_t i = 0; i < pBlockInfo->numOfCols; ++i) { + for (int32_t i = 0; i < pBlockInfo->numOfCols; ++i) { SColumnInfoData* pcol = taosArrayGet(pBlock->pDataBlock, i); pcol->pData = NULL; } @@ -250,12 +250,15 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca // currently only the tbname pseudo column if (pTableScanInfo->numOfPseudoExpr > 0) { - addTagPseudoColumnData(pTableScanInfo, pBlock); + addTagPseudoColumnData(&pTableScanInfo->readHandle, pTableScanInfo->pPseudoExpr, pTableScanInfo->numOfPseudoExpr, pBlock); } - // todo record the filter time cost + int64_t st = taosGetTimestampMs(); doFilter(pTableScanInfo->pFilterNode, pBlock, pTableScanInfo->pColMatchInfo); + int64_t et = taosGetTimestampMs(); + pTableScanInfo->readRecorder.filterTime += (et - st); + if (pBlock->info.rows == 0) { pCost->filterOutBlocks += 1; qDebug("%s data block filter out, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo), @@ -271,23 +274,31 @@ static void prepareForDescendingScan(STableScanInfo* pTableScanInfo, SqlFunction switchCtxOrder(pCtx, numOfOutput); // setupQueryRangeForReverseScan(pTableScanInfo); - STimeWindow* pTWindow = &pTableScanInfo->cond.twindow; - TSWAP(pTWindow->skey, pTWindow->ekey); pTableScanInfo->cond.order = TSDB_ORDER_DESC; + for (int32_t i = 0; i < pTableScanInfo->cond.numOfTWindows; ++i) { + STimeWindow* pTWindow = &pTableScanInfo->cond.twindows[i]; + TSWAP(pTWindow->skey, pTWindow->ekey); + } + SQueryTableDataCond *pCond = &pTableScanInfo->cond; + taosqsort(pCond->twindows, + pCond->numOfTWindows, + sizeof(STimeWindow), + pCond, + compareTimeWindow); } -void addTagPseudoColumnData(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock) { +void addTagPseudoColumnData(SReadHandle *pHandle, SExprInfo* pPseudoExpr, int32_t numOfPseudoExpr, SSDataBlock* pBlock) { // currently only the tbname pseudo column - if (pTableScanInfo->numOfPseudoExpr == 0) { + if (numOfPseudoExpr == 0) { return; } SMetaReader mr = {0}; - metaReaderInit(&mr, pTableScanInfo->readHandle.meta, 0); + metaReaderInit(&mr, pHandle->meta, 0); metaGetTableEntryByUid(&mr, pBlock->info.uid); - for (int32_t j = 0; j < pTableScanInfo->numOfPseudoExpr; ++j) { - SExprInfo* pExpr = &pTableScanInfo->pPseudoExpr[j]; + for (int32_t j = 0; j < numOfPseudoExpr; ++j) { + SExprInfo* pExpr = &pPseudoExpr[j]; int32_t dstSlotId = pExpr->base.resSchema.slotId; @@ -298,26 +309,30 @@ void addTagPseudoColumnData(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock) // this is to handle the tbname if (fmIsScanPseudoColumnFunc(functionId)) { - setTbNameColData(pTableScanInfo->readHandle.meta, pBlock, pColInfoData, functionId); + setTbNameColData(pHandle->meta, pBlock, pColInfoData, functionId); } else { // these are tags const char* p = NULL; - if(pColInfoData->info.type == TSDB_DATA_TYPE_JSON){ - const uint8_t *tmp = mr.me.ctbEntry.pTags; - char *data = taosMemoryCalloc(kvRowLen(tmp) + 1, 1); - if(data == NULL){ + if (pColInfoData->info.type == TSDB_DATA_TYPE_JSON) { + const uint8_t* tmp = mr.me.ctbEntry.pTags; + + char* data = taosMemoryCalloc(kvRowLen(tmp) + 1, 1); + if (data == NULL) { + metaReaderClear(&mr); qError("doTagScan calloc error:%d", kvRowLen(tmp) + 1); return; } + *data = TSDB_DATA_TYPE_JSON; - memcpy(data+1, tmp, kvRowLen(tmp)); + memcpy(data + 1, tmp, kvRowLen(tmp)); p = data; - }else{ + } else { p = metaGetTableTagVal(&mr.me, pExpr->base.pParam[0].pCol->colId); } + for (int32_t i = 0; i < pBlock->info.rows; ++i) { colDataAppend(pColInfoData, i, p, (p == NULL)); } - if(pColInfoData->info.type == TSDB_DATA_TYPE_JSON){ + if (pColInfoData->info.type == TSDB_DATA_TYPE_JSON) { taosMemoryFree((void*)p); } } @@ -335,9 +350,8 @@ void setTbNameColData(void* pMeta, const SSDataBlock* pBlock, SColumnInfoData* p infoData.info.bytes = sizeof(uint64_t); colInfoDataEnsureCapacity(&infoData, 0, 1); - colDataAppendInt64(&infoData, 0, (int64_t*) &pBlock->info.uid); - SScalarParam srcParam = { - .numOfRows = pBlock->info.rows, .param = pMeta, .columnData = &infoData}; + colDataAppendInt64(&infoData, 0, (int64_t*)&pBlock->info.uid); + SScalarParam srcParam = {.numOfRows = pBlock->info.rows, .param = pMeta, .columnData = &infoData}; SScalarParam param = {.columnData = pColInfoData}; fpSet.process(&srcParam, 1, ¶m); @@ -347,6 +361,8 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) { STableScanInfo* pTableScanInfo = pOperator->info; SSDataBlock* pBlock = pTableScanInfo->pResBlock; + int64_t st = taosGetTimestampUs(); + while (tsdbNextDataBlock(pTableScanInfo->dataReader)) { if (isTaskKilled(pOperator->pTaskInfo)) { longjmp(pOperator->pTaskInfo->env, TSDB_CODE_TSC_QUERY_CANCELLED); @@ -366,9 +382,12 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) { continue; } + pOperator->resultInfo.totalRows = pTableScanInfo->readRecorder.totalRows; + pTableScanInfo->readRecorder.elapsedTime += (taosGetTimestampUs() - st) / 1000.0; + + pOperator->cost.totalCost = pTableScanInfo->readRecorder.elapsedTime; return pBlock; } - return NULL; } @@ -383,9 +402,15 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { // do the ascending order traverse in the first place. while (pTableScanInfo->scanTimes < pTableScanInfo->scanInfo.numOfAsc) { - SSDataBlock* p = doTableScanImpl(pOperator); - if (p != NULL) { - return p; + while (pTableScanInfo->curTWinIdx < pTableScanInfo->cond.numOfTWindows) { + SSDataBlock* p = doTableScanImpl(pOperator); + if (p != NULL) { + return p; + } + pTableScanInfo->curTWinIdx += 1; + if (pTableScanInfo->curTWinIdx < pTableScanInfo->cond.numOfTWindows) { + tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, pTableScanInfo->curTWinIdx); + } } pTableScanInfo->scanTimes += 1; @@ -393,13 +418,14 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { if (pTableScanInfo->scanTimes < pTableScanInfo->scanInfo.numOfAsc) { setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED); pTableScanInfo->scanFlag = REPEAT_SCAN; - - STimeWindow* pWin = &pTableScanInfo->cond.twindow; - qDebug("%s start to repeat ascending order scan data blocks due to query func required, qrange:%" PRId64 - "-%" PRId64, GET_TASKID(pTaskInfo), pWin->skey, pWin->ekey); - + qDebug("%s start to repeat ascending order scan data blocks due to query func required", GET_TASKID(pTaskInfo)); + for (int32_t i = 0; i < pTableScanInfo->cond.numOfTWindows; ++i) { + STimeWindow* pWin = &pTableScanInfo->cond.twindows[i]; + qDebug("%s\t qrange:%" PRId64 "-%" PRId64, GET_TASKID(pTaskInfo), pWin->skey, pWin->ekey); + } // do prepare for the next round table scan operation - tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond); + tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, 0); + pTableScanInfo->curTWinIdx = 0; } } @@ -407,31 +433,40 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { if (pTableScanInfo->scanTimes < total) { if (pTableScanInfo->cond.order == TSDB_ORDER_ASC) { prepareForDescendingScan(pTableScanInfo, pTableScanInfo->pCtx, pTableScanInfo->numOfOutput); - tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond); + tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, 0); + pTableScanInfo->curTWinIdx = 0; } - STimeWindow* pWin = &pTableScanInfo->cond.twindow; - qDebug("%s start to descending order scan data blocks due to query func required, qrange:%" PRId64 "-%" PRId64, - GET_TASKID(pTaskInfo), pWin->skey, pWin->ekey); - + qDebug("%s start to descending order scan data blocks due to query func required", GET_TASKID(pTaskInfo)); + for (int32_t i = 0; i < pTableScanInfo->cond.numOfTWindows; ++i) { + STimeWindow* pWin = &pTableScanInfo->cond.twindows[i]; + qDebug("%s\t qrange:%" PRId64 "-%" PRId64, GET_TASKID(pTaskInfo), pWin->skey, pWin->ekey); + } while (pTableScanInfo->scanTimes < total) { - SSDataBlock* p = doTableScanImpl(pOperator); - if (p != NULL) { - return p; + while (pTableScanInfo->curTWinIdx < pTableScanInfo->cond.numOfTWindows) { + SSDataBlock* p = doTableScanImpl(pOperator); + if (p != NULL) { + return p; + } + pTableScanInfo->curTWinIdx += 1; + if (pTableScanInfo->curTWinIdx < pTableScanInfo->cond.numOfTWindows) { + tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, pTableScanInfo->curTWinIdx); + } } pTableScanInfo->scanTimes += 1; - if (pTableScanInfo->scanTimes < pTableScanInfo->scanInfo.numOfAsc) { + if (pTableScanInfo->scanTimes < total) { setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED); pTableScanInfo->scanFlag = REPEAT_SCAN; - qDebug("%s start to repeat descending order scan data blocks due to query func required, qrange:%" PRId64 - "-%" PRId64, - GET_TASKID(pTaskInfo), pTaskInfo->window.skey, pTaskInfo->window.ekey); - - // do prepare for the next round table scan operation - tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond); + qDebug("%s start to repeat descending order scan data blocks due to query func required", GET_TASKID(pTaskInfo)); + for (int32_t i = 0; i < pTableScanInfo->cond.numOfTWindows; ++i) { + STimeWindow* pWin = &pTableScanInfo->cond.twindows[i]; + qDebug("%s\t qrange:%" PRId64 "-%" PRId64, GET_TASKID(pTaskInfo), pWin->skey, pWin->ekey); + } + tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, 0); + pTableScanInfo->curTWinIdx = 0; } } } @@ -452,6 +487,15 @@ SInterval extractIntervalInfo(const STableScanPhysiNode* pTableScanNode) { return interval; } +static int32_t getTableScannerExecInfo(struct SOperatorInfo* pOptr, void** pOptrExplain, uint32_t* len) { + SFileBlockLoadRecorder* pRecorder = taosMemoryCalloc(1, sizeof(SFileBlockLoadRecorder)); + STableScanInfo* pTableScanInfo = pOptr->info; + *pRecorder = pTableScanInfo->readRecorder; + *pOptrExplain = pRecorder; + *len = sizeof(SFileBlockLoadRecorder); + return 0; +} + static void destroyTableScanOperatorInfo(void* param, int32_t numOfOutput) { STableScanInfo* pTableScanInfo = (STableScanInfo*)param; taosMemoryFree(pTableScanInfo->pResBlock); @@ -462,7 +506,8 @@ static void destroyTableScanOperatorInfo(void* param, int32_t numOfOutput) { } } -SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, tsdbReaderT pDataReader, SReadHandle* readHandle, SExecTaskInfo* pTaskInfo) { +SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, tsdbReaderT pDataReader, + SReadHandle* readHandle, SExecTaskInfo* pTaskInfo) { STableScanInfo* pInfo = taosMemoryCalloc(1, sizeof(STableScanInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { @@ -476,7 +521,8 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SDataBlockDescNode* pDescNode = pTableScanNode->scan.node.pOutputDataBlockDesc; int32_t numOfCols = 0; - SArray* pColList = extractColMatchInfo(pTableScanNode->scan.pScanCols, pDescNode, &numOfCols, pTaskInfo, COL_MATCH_FROM_COL_ID); + SArray* pColList = + extractColMatchInfo(pTableScanNode->scan.pScanCols, pDescNode, &numOfCols, pTaskInfo, COL_MATCH_FROM_COL_ID); int32_t code = initQueryTableDataCond(&pInfo->cond, pTableScanNode); if (code != TSDB_CODE_SUCCESS) { @@ -485,38 +531,36 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, if (pTableScanNode->scan.pScanPseudoCols != NULL) { pInfo->pPseudoExpr = createExprInfo(pTableScanNode->scan.pScanPseudoCols, NULL, &pInfo->numOfPseudoExpr); - pInfo->pPseudoCtx = createSqlFunctionCtx(pInfo->pPseudoExpr, pInfo->numOfPseudoExpr, &pInfo->rowCellInfoOffset); + pInfo->pPseudoCtx = createSqlFunctionCtx(pInfo->pPseudoExpr, pInfo->numOfPseudoExpr, &pInfo->rowCellInfoOffset); } pInfo->scanInfo = (SScanInfo){.numOfAsc = pTableScanNode->scanSeq[0], .numOfDesc = pTableScanNode->scanSeq[1]}; -// pInfo->scanInfo = (SScanInfo){.numOfAsc = 0, .numOfDesc = 1}; // for debug purpose +// pInfo->scanInfo = (SScanInfo){.numOfAsc = 0, .numOfDesc = 1}; // for debug purpose - pInfo->readHandle = *readHandle; - pInfo->interval = extractIntervalInfo(pTableScanNode); - pInfo->sampleRatio = pTableScanNode->ratio; + pInfo->readHandle = *readHandle; + pInfo->interval = extractIntervalInfo(pTableScanNode); + pInfo->sampleRatio = pTableScanNode->ratio; pInfo->dataBlockLoadFlag = pTableScanNode->dataRequired; - pInfo->pResBlock = createResDataBlock(pDescNode); - pInfo->pFilterNode = pTableScanNode->scan.node.pConditions; - pInfo->dataReader = pDataReader; - pInfo->scanFlag = MAIN_SCAN; - pInfo->pColMatchInfo = pColList; + pInfo->pResBlock = createResDataBlock(pDescNode); + pInfo->pFilterNode = pTableScanNode->scan.node.pConditions; + pInfo->dataReader = pDataReader; + pInfo->scanFlag = MAIN_SCAN; + pInfo->pColMatchInfo = pColList; + pInfo->curTWinIdx = 0; - pOperator->name = "TableScanOperator"; // for debug purpose + pOperator->name = "TableScanOperator"; // for debug purpose pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN; - pOperator->blocking = false; - pOperator->status = OP_NOT_OPENED; - pOperator->info = pInfo; - pOperator->numOfExprs = numOfCols; - pOperator->pTaskInfo = pTaskInfo; - - pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doTableScan, NULL, NULL, destroyTableScanOperatorInfo, NULL, NULL, NULL); + pOperator->blocking = false; + pOperator->status = OP_NOT_OPENED; + pOperator->info = pInfo; + pOperator->numOfExprs = numOfCols; + pOperator->pTaskInfo = pTaskInfo; - static int32_t cost = 0; + pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doTableScan, NULL, NULL, destroyTableScanOperatorInfo, + NULL, NULL, getTableScannerExecInfo); // for non-blocking operator, the open cost is always 0 pOperator->cost.openCost = 0; - pOperator->cost.totalCost = ++cost; - pOperator->resultInfo.totalRows = ++cost; return pOperator; } @@ -631,20 +675,35 @@ static void doClearBufferedBlocks(SStreamBlockScanInfo* pInfo) { taosArrayClear(pInfo->pBlockLists); } +static bool isSessionWindow(SStreamBlockScanInfo* pInfo) { return pInfo->sessionSup.pStreamAggSup != NULL; } + static bool prepareDataScan(SStreamBlockScanInfo* pInfo) { SSDataBlock* pSDB = pInfo->pUpdateRes; if (pInfo->updateResIndex < pSDB->info.rows) { SColumnInfoData* pColDataInfo = taosArrayGet(pSDB->pDataBlock, 0); - TSKEY *tsCols = (TSKEY*)pColDataInfo->pData; - SResultRowInfo dumyInfo; + TSKEY* tsCols = (TSKEY*)pColDataInfo->pData; + SResultRowInfo dumyInfo; dumyInfo.cur.pageId = -1; - STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[pInfo->updateResIndex], &pInfo->interval, - pInfo->interval.precision, NULL); + STimeWindow win; + if (isSessionWindow(pInfo)) { + SStreamAggSupporter* pAggSup = pInfo->sessionSup.pStreamAggSup; + int64_t gap = pInfo->sessionSup.gap; + int32_t winIndex = 0; + SResultWindowInfo* pCurWin = + getSessionTimeWindow(pAggSup->pResultRows, tsCols[pInfo->updateResIndex], gap, &winIndex); + win = pCurWin->win; + pInfo->updateResIndex += + updateSessionWindowInfo(pCurWin, tsCols, pSDB->info.rows, pInfo->updateResIndex, gap, NULL); + } else { + win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[pInfo->updateResIndex], &pInfo->interval, + pInfo->interval.precision, NULL); + pInfo->updateResIndex += getNumOfRowsInTimeWindow(&pSDB->info, tsCols, pInfo->updateResIndex, win.ekey, + binarySearchForKey, NULL, TSDB_ORDER_ASC); + } STableScanInfo* pTableScanInfo = pInfo->pOperatorDumy->info; - pTableScanInfo->cond.twindow = win; - tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond); - pInfo->updateResIndex += getNumOfRowsInTimeWindow(&pSDB->info, tsCols, pInfo->updateResIndex, - win.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC); + pTableScanInfo->cond.twindows[0] = win; + pTableScanInfo->curTWinIdx = 0; + tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, 0); pTableScanInfo->scanTimes = 0; return true; } else { @@ -679,8 +738,8 @@ static SSDataBlock* getUpdateDataBlock(SStreamBlockScanInfo* pInfo, bool inverti // p->info.type = STREAM_INVERT; // taosArrayClear(pInfo->tsArray); // return p; - SSDataBlock* pDataBlock = createOneDataBlock(pInfo->pRes, false); - SColumnInfoData* pCol = (SColumnInfoData*) taosArrayGet(pDataBlock->pDataBlock, 0); + SSDataBlock* pDataBlock = createOneDataBlock(pInfo->pRes, false); + SColumnInfoData* pCol = (SColumnInfoData*)taosArrayGet(pDataBlock->pDataBlock, 0); ASSERT(pCol->info.type == TSDB_DATA_TYPE_TIMESTAMP); colInfoDataEnsureCapacity(pCol, 0, size); for (int32_t i = 0; i < size; i++) { @@ -696,96 +755,6 @@ static SSDataBlock* getUpdateDataBlock(SStreamBlockScanInfo* pInfo, bool inverti return NULL; } -void static setSupKeyBuf(SCatchSupporter* pSup, int64_t groupId, int64_t childId, TSKEY ts) { - int64_t* pKey = (int64_t*)pSup->pKeyBuf; - pKey[0] = groupId; - pKey[1] = childId; - pKey[2] = ts; -} - -static int32_t catchWidonwInfo(SSDataBlock* pDataBlock, SCatchSupporter* pSup, - int32_t pageId, int32_t tsIndex, int64_t childId) { - SColumnInfoData* pColDataInfo = taosArrayGet(pDataBlock->pDataBlock, tsIndex); - TSKEY* tsCols = (int64_t*)pColDataInfo->pData; - for (int32_t i = 0; i < pDataBlock->info.rows; i++) { - setSupKeyBuf(pSup, pDataBlock->info.groupId, childId, tsCols[i]); - SWindowPosition* p1 = (SWindowPosition*)taosHashGet(pSup->pWindowHashTable, - pSup->pKeyBuf, pSup->keySize); - if (p1 == NULL) { - SWindowPosition pos = {.pageId = pageId, .rowId = i}; - int32_t code = taosHashPut(pSup->pWindowHashTable, pSup->pKeyBuf, pSup->keySize, &pos, - sizeof(SWindowPosition)); - if (code != TSDB_CODE_SUCCESS ) { - return code; - } - } else { - p1->pageId = pageId; - p1->rowId = i; - } - } - return TSDB_CODE_SUCCESS; -} - -static int32_t catchDatablock(SSDataBlock* pDataBlock, SCatchSupporter* pSup, - int32_t tsIndex, int64_t childId) { - int32_t start = 0; - int32_t stop = 0; - int32_t pageSize = getBufPageSize(pSup->pDataBuf); - while(start < pDataBlock->info.rows) { - blockDataSplitRows(pDataBlock, pDataBlock->info.hasVarCol, start, &stop, pageSize); - SSDataBlock* pDB = blockDataExtractBlock(pDataBlock, start, stop - start + 1); - if (pDB == NULL) { - return terrno; - } - int32_t pageId = -1; - void* pPage = getNewBufPage(pSup->pDataBuf, pDataBlock->info.groupId, &pageId); - if (pPage == NULL) { - blockDataDestroy(pDB); - return terrno; - } - int32_t size = blockDataGetSize(pDB) + sizeof(int32_t) + pDB->info.numOfCols * sizeof(int32_t); - assert(size <= pageSize); - blockDataToBuf(pPage, pDB); - setBufPageDirty(pPage, true); - releaseBufPage(pSup->pDataBuf, pPage); - blockDataDestroy(pDB); - start = stop + 1; - int32_t code = catchWidonwInfo(pDB, pSup, pageId, tsIndex, childId); - if (code != TSDB_CODE_SUCCESS ) { - return code; - } - } - return TSDB_CODE_SUCCESS; -} - -static SSDataBlock* getDataFromCatch(SStreamBlockScanInfo* pInfo) { - SSDataBlock* pBlock = pInfo->pUpdateRes; - if (pInfo->updateResIndex < pBlock->info.rows) { - blockDataCleanup(pInfo->pRes); - SCatchSupporter* pCSup = &pInfo->childAggSup; - SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, 0); - TSKEY *tsCols = (TSKEY*)pColDataInfo->pData; - int32_t size = taosArrayGetSize(pInfo->childIds); - for (int32_t i = 0; i < size; i++) { - int64_t id = *(int64_t *)taosArrayGet(pInfo->childIds, i); - setSupKeyBuf(pCSup, pBlock->info.groupId, id, - tsCols[pInfo->updateResIndex]); - SWindowPosition* pos = (SWindowPosition*)taosHashGet(pCSup->pWindowHashTable, - pCSup->pKeyBuf, pCSup->keySize); - void* buf = getBufPage(pCSup->pDataBuf, pos->pageId); - SSDataBlock* pDB = createOneDataBlock(pInfo->pRes, false); - blockDataFromBuf(pDB, buf); - SSDataBlock* pSub = blockDataExtractBlock(pDB, pos->rowId, 1); - blockDataMerge(pInfo->pRes, pSub, NULL); - blockDataDestroy(pDB); - blockDataDestroy(pSub); - } - pInfo->updateResIndex++; - return pInfo->pRes; - } - return NULL; -} - static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { // NOTE: this operator does never check if current status is done or not SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; @@ -799,15 +768,6 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { size_t total = taosArrayGetSize(pInfo->pBlockLists); if (pInfo->blockType == STREAM_DATA_TYPE_SSDATA_BLOCK) { - if (pInfo->scanMode == STREAM_SCAN_FROM_UPDATERES) { - SSDataBlock* pDB = getDataFromCatch(pInfo); - if (pDB != NULL) { - return pDB; - } else { - pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE; - } - } - if (pInfo->validBlockIndex >= total) { doClearBufferedBlocks(pInfo); pOperator->status = OP_EXEC_DONE; @@ -815,17 +775,7 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { } int32_t current = pInfo->validBlockIndex++; - SSDataBlock* pBlock = taosArrayGetP(pInfo->pBlockLists, current); - if (pBlock->info.type == STREAM_REPROCESS) { - pInfo->scanMode = STREAM_SCAN_FROM_UPDATERES; - } else { - int32_t code = catchDatablock(pBlock, &pInfo->childAggSup, pInfo->primaryTsIndex, 0); - if (code != TDB_CODE_SUCCESS) { - pTaskInfo->code = code; - longjmp(pTaskInfo->env, code); - } - } - return pBlock; + return taosArrayGetP(pInfo->pBlockLists, current); } else { if (pInfo->scanMode == STREAM_SCAN_FROM_RES) { blockDataDestroy(pInfo->pUpdateRes); @@ -834,6 +784,7 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { } else if (pInfo->scanMode == STREAM_SCAN_FROM_UPDATERES) { blockDataCleanup(pInfo->pRes); pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER; + prepareDataScan(pInfo); return pInfo->pUpdateRes; } else if (pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER) { SSDataBlock* pSDB = doDataScan(pInfo); @@ -866,8 +817,15 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { pInfo->pRes->info.uid = uid; pInfo->pRes->info.type = STREAM_NORMAL; - int32_t numOfCols = pInfo->pRes->info.numOfCols; - for (int32_t i = 0; i < numOfCols; ++i) { + // for generating rollup SMA result, each time is an independent time serie. + // TODO temporarily used, when the statement of "partition by tbname" is ready, remove this + if (pInfo->assignBlockUid) { + pInfo->pRes->info.groupId = uid; + } else { + pInfo->pRes->info.groupId = groupId; + } + + for (int32_t i = 0; i < taosArrayGetSize(pInfo->pColMatchInfo); ++i) { SColMatchInfo* pColMatchInfo = taosArrayGet(pInfo->pColMatchInfo, i); if (!pColMatchInfo->output) { continue; @@ -897,26 +855,31 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { pTaskInfo->code = terrno; return NULL; } + rows = pBlockInfo->rows; + + // currently only the tbname pseudo column + if (pInfo->numOfPseudoExpr > 0) { + addTagPseudoColumnData(&pInfo->readHandle, pInfo->pPseudoExpr, pInfo->numOfPseudoExpr, pInfo->pRes); + } + doFilter(pInfo->pCondition, pInfo->pRes, NULL); blockDataUpdateTsWindow(pInfo->pRes, 0); - break; } // record the scan action. pInfo->numOfExec++; - pInfo->numOfRows += pBlockInfo->rows; + pOperator->resultInfo.totalRows += pBlockInfo->rows; if (rows == 0) { pOperator->status = OP_EXEC_DONE; - } else if (pInfo->interval.interval > 0) { - SSDataBlock* upRes = getUpdateDataBlock(pInfo, true); //TODO(liuyao) get invertible from plan + } else if (pInfo->pUpdateInfo) { + SSDataBlock* upRes = getUpdateDataBlock(pInfo, true); // TODO(liuyao) get invertible from plan if (upRes) { pInfo->pUpdateRes = upRes; if (upRes->info.type == STREAM_REPROCESS) { pInfo->updateResIndex = 0; - prepareDataScan(pInfo); pInfo->scanMode = STREAM_SCAN_FROM_UPDATERES; } else if (upRes->info.type == STREAM_INVERT) { pInfo->scanMode = STREAM_SCAN_FROM_RES; @@ -929,10 +892,9 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { } } -SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, void* pDataReader, SReadHandle* pHandle, - uint64_t uid, SSDataBlock* pResBlock, SArray* pColList, - SArray* pTableIdList, SExecTaskInfo* pTaskInfo, SNode* pCondition, - SOperatorInfo* pOperatorDumy) { +SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHandle, + SArray* pTableIdList, STableScanPhysiNode* pTableScanNode, SExecTaskInfo* pTaskInfo, + STimeWindowAggSupp* pTwSup, int16_t tsColId) { SStreamBlockScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamBlockScanInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { @@ -940,22 +902,28 @@ SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, void* pDataR goto _error; } - STableScanInfo* pSTInfo = (STableScanInfo*)pOperatorDumy->info; + SScanPhysiNode* pScanPhyNode = &pTableScanNode->scan; + + SDataBlockDescNode* pDescNode = pScanPhyNode->node.pOutputDataBlockDesc; + SOperatorInfo* pTableScanDummy = createTableScanOperatorInfo(pTableScanNode, pDataReader, pHandle, pTaskInfo); - int32_t numOfOutput = taosArrayGetSize(pColList); + STableScanInfo* pSTInfo = (STableScanInfo*)pTableScanDummy->info; - SArray* pColIds = taosArrayInit(4, sizeof(int16_t)); + int32_t numOfCols = 0; + pInfo->pColMatchInfo = extractColMatchInfo(pScanPhyNode->pScanCols, pDescNode, &numOfCols, pTaskInfo, COL_MATCH_FROM_COL_ID); + + int32_t numOfOutput = taosArrayGetSize(pInfo->pColMatchInfo); + SArray* pColIds = taosArrayInit(numOfOutput, sizeof(int16_t)); for (int32_t i = 0; i < numOfOutput; ++i) { - SColMatchInfo* id = taosArrayGet(pColList, i); + SColMatchInfo* id = taosArrayGet(pInfo->pColMatchInfo, i); + int16_t colId = id->colId; taosArrayPush(pColIds, &colId); } - pInfo->pColMatchInfo = pColList; - // set the extract column id to streamHandle - tqReadHandleSetColIdList((STqReadHandle*)streamReadHandle, pColIds); - int32_t code = tqReadHandleSetTbUidList(streamReadHandle, pTableIdList); + tqReadHandleSetColIdList((STqReadHandle*)pHandle->reader, pColIds); + int32_t code = tqReadHandleSetTbUidList(pHandle->reader, pTableIdList); if (code != 0) { goto _error; } @@ -971,37 +939,39 @@ SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, void* pDataR goto _error; } - pInfo->primaryTsIndex = 0; // TODO(liuyao) get it from physical plan + pInfo->primaryTsIndex = tsColId; if (pSTInfo->interval.interval > 0) { - pInfo->pUpdateInfo = updateInfoInitP(&pSTInfo->interval, 10000); // TODO(liuyao) get watermark from physical plan + pInfo->pUpdateInfo = updateInfoInitP(&pSTInfo->interval, pTwSup->waterMark); } else { pInfo->pUpdateInfo = NULL; } - pInfo->readHandle = *pHandle; - pInfo->tableUid = uid; - pInfo->streamBlockReader = streamReadHandle; - pInfo->pRes = pResBlock; - pInfo->pCondition = pCondition; - pInfo->pDataReader = pDataReader; - pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE; - pInfo->pOperatorDumy = pOperatorDumy; - pInfo->interval = pSTInfo->interval; - - size_t childKeyBufSize = sizeof(int64_t) + sizeof(int64_t) + sizeof(TSKEY); - initCatchSupporter(&pInfo->childAggSup, 1024, childKeyBufSize, - "StreamFinalInterval", "/tmp/"); // TODO(liuyao) get row size from phy plan - - pOperator->name = "StreamBlockScanOperator"; + // create the pseduo columns info + if (pTableScanNode->scan.pScanPseudoCols != NULL) { + pInfo->pPseudoExpr = createExprInfo(pTableScanNode->scan.pScanPseudoCols, NULL, &pInfo->numOfPseudoExpr); + } + + pInfo->readHandle = *pHandle; + pInfo->tableUid = pScanPhyNode->uid; + pInfo->streamBlockReader = pHandle->reader; + pInfo->pRes = createResDataBlock(pDescNode); + pInfo->pCondition = pScanPhyNode->node.pConditions; + pInfo->pDataReader = pDataReader; + pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE; + pInfo->pOperatorDumy = pTableScanDummy; + pInfo->interval = pSTInfo->interval; + pInfo->sessionSup = (SessionWindowSupporter){.pStreamAggSup = NULL, .gap = -1}; + + pOperator->name = "StreamBlockScanOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN; - pOperator->blocking = false; - pOperator->status = OP_NOT_OPENED; - pOperator->info = pInfo; - pOperator->numOfExprs = pResBlock->info.numOfCols; - pOperator->pTaskInfo = pTaskInfo; + pOperator->blocking = false; + pOperator->status = OP_NOT_OPENED; + pOperator->info = pInfo; + pOperator->numOfExprs = pInfo->pRes->info.numOfCols; + pOperator->pTaskInfo = pTaskInfo; - pOperator->fpSet = - createOperatorFpSet(operatorDummyOpenFn, doStreamBlockScan, NULL, NULL, operatorDummyCloseFn, NULL, NULL, NULL); + pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamBlockScan, NULL, + NULL, operatorDummyCloseFn, NULL, NULL, NULL); return pOperator; @@ -1017,8 +987,9 @@ static void destroySysScanOperator(void* param, int32_t numOfOutput) { blockDataDestroy(pInfo->pRes); const char* name = tNameGetTableName(&pInfo->name); - if (strncasecmp(name, TSDB_INS_TABLE_USER_TABLES, TSDB_TABLE_FNAME_LEN) == 0) { + if (strncasecmp(name, TSDB_INS_TABLE_USER_TABLES, TSDB_TABLE_FNAME_LEN) == 0 || pInfo->pCur != NULL) { metaCloseTbCursor(pInfo->pCur); + pInfo->pCur = NULL; } taosArrayDestroy(pInfo->scanCols); @@ -1189,18 +1160,18 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) { // retrieve local table list info from vnode const char* name = tNameGetTableName(&pInfo->name); if (strncasecmp(name, TSDB_INS_TABLE_USER_TABLES, TSDB_TABLE_FNAME_LEN) == 0) { + if (pOperator->status == OP_EXEC_DONE) { + return NULL; + } + // the retrieve is executed on the mnode, so return tables that belongs to the information schema database. if (pInfo->readHandle.mnd != NULL) { - if (pOperator->status == OP_EXEC_DONE) { - return NULL; - } - buildSysDbTableInfo(pInfo, pOperator->resultInfo.capacity); doFilterResult(pInfo); pInfo->loadInfo.totalRows += pInfo->pRes->info.rows; - pOperator->status = OP_EXEC_DONE; + doSetOperatorCompleted(pOperator); return (pInfo->pRes->info.rows == 0) ? NULL : pInfo->pRes; } else { if (pInfo->pCur == NULL) { @@ -1226,7 +1197,9 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) { blockDataEnsureCapacity(p, pOperator->resultInfo.capacity); char n[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; - while (metaTbCursorNext(pInfo->pCur) == 0) { + + int32_t ret = 0; + while ((ret = metaTbCursorNext(pInfo->pCur)) == 0) { STR_TO_VARSTR(n, pInfo->pCur->mr.me.name); // table name @@ -1260,7 +1233,7 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) { // number of columns pColInfoData = taosArrayGet(p->pDataBlock, 3); - colDataAppend(pColInfoData, numOfRows, (char*)&mr.me.stbEntry.schema.nCols, false); + colDataAppend(pColInfoData, numOfRows, (char*)&mr.me.stbEntry.schemaRow.nCols, false); // super table name STR_TO_VARSTR(str, mr.me.name); @@ -1284,7 +1257,7 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) { // number of columns pColInfoData = taosArrayGet(p->pDataBlock, 3); - colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.ntbEntry.schema.nCols, false); + colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.ntbEntry.schemaRow.nCols, false); // super table name pColInfoData = taosArrayGet(p->pDataBlock, 4); @@ -1309,6 +1282,13 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) { } } + // todo temporarily free the cursor here, the true reason why the free is not valid needs to be found + if (ret != 0) { + metaCloseTbCursor(pInfo->pCur); + pInfo->pCur = NULL; + doSetOperatorCompleted(pOperator); + } + p->info.rows = numOfRows; pInfo->pRes->info.rows = numOfRows; @@ -1580,20 +1560,19 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) { SExprInfo* pExprInfo = &pOperator->pExpr[0]; SSDataBlock* pRes = pInfo->pRes; - if (taosArrayGetSize(pInfo->pTableGroups->pGroupList) == 0) { + int32_t size = taosArrayGetSize(pInfo->pTableList->pTableList); + if (size == 0) { setTaskStatus(pTaskInfo, TASK_COMPLETED); return NULL; } - SArray* pa = taosArrayGetP(pInfo->pTableGroups->pGroupList, 0); - char str[512] = {0}; int32_t count = 0; SMetaReader mr = {0}; metaReaderInit(&mr, pInfo->readHandle.meta, 0); - while (pInfo->curPos < pInfo->pTableGroups->numOfTables && count < pOperator->resultInfo.capacity) { - STableKeyInfo* item = taosArrayGet(pa, pInfo->curPos); + while (pInfo->curPos < size && count < pOperator->resultInfo.capacity) { + STableKeyInfo* item = taosArrayGet(pInfo->pTableList->pTableList, pInfo->curPos); metaGetTableEntryByUid(&mr, item->uid); for (int32_t j = 0; j < pOperator->numOfExprs; ++j) { @@ -1603,19 +1582,21 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) { if (fmIsScanPseudoColumnFunc(pExprInfo[j].pExpr->_function.functionId)) { STR_TO_VARSTR(str, mr.me.name); colDataAppend(pDst, count, str, false); - } else { // it is a tag value - if(pDst->info.type == TSDB_DATA_TYPE_JSON){ - const uint8_t *tmp = mr.me.ctbEntry.pTags; - char *data = taosMemoryCalloc(kvRowLen(tmp) + 1, 1); - if(data == NULL){ - qError("doTagScan calloc error:%d", kvRowLen(tmp) + 1); - return NULL; + } else { // it is a tag value + if (pDst->info.type == TSDB_DATA_TYPE_JSON) { + const uint8_t* tmp = mr.me.ctbEntry.pTags; + // TODO opt perf by realloc memory + char* data = taosMemoryCalloc(kvRowLen(tmp) + 1, 1); + if (data == NULL) { + qError("%s failed to malloc memory, size:%d", GET_TASKID(pTaskInfo), kvRowLen(tmp) + 1); + longjmp(pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY); } + *data = TSDB_DATA_TYPE_JSON; - memcpy(data+1, tmp, kvRowLen(tmp)); + memcpy(data + 1, tmp, kvRowLen(tmp)); colDataAppend(pDst, count, data, false); taosMemoryFree(data); - }else{ + } else { const char* p = metaGetTableTagVal(&mr.me, pExprInfo[j].base.pParam[0].pCol->colId); colDataAppend(pDst, count, p, (p == NULL)); } @@ -1623,8 +1604,8 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) { } count += 1; - if (++pInfo->curPos >= pInfo->pTableGroups->numOfTables) { - pOperator->status = OP_EXEC_DONE; + if (++pInfo->curPos >= size) { + doSetOperatorCompleted(pOperator); } } @@ -1636,6 +1617,8 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) { } pRes->info.rows = count; + pOperator->resultInfo.totalRows += count; + return (pRes->info.rows == 0) ? NULL : pInfo->pRes; } @@ -1646,14 +1629,14 @@ static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput) { SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, SExprInfo* pExpr, int32_t numOfOutput, SSDataBlock* pResBlock, SArray* pColMatchInfo, - STableGroupInfo* pTableGroupInfo, SExecTaskInfo* pTaskInfo) { + STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo) { STagScanInfo* pInfo = taosMemoryCalloc(1, sizeof(STagScanInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { goto _error; } - pInfo->pTableGroups = pTableGroupInfo; + pInfo->pTableList = pTableListInfo; pInfo->pColMatchInfo = pColMatchInfo; pInfo->pRes = pResBlock; pInfo->readHandle = *pReadHandle; diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c index 990dc0f20002134eebc0cbe15a0fc4d0e34e6dc8..8f5fa88070fde1625385fd6e691ccccf424c2094 100644 --- a/source/libs/executor/src/sortoperator.c +++ b/source/libs/executor/src/sortoperator.c @@ -2,6 +2,9 @@ #include "executorimpl.h" static SSDataBlock* doSort(SOperatorInfo* pOperator); +static int32_t doOpenSortOperator(SOperatorInfo* pOperator); +static int32_t getExplainExecInfo(SOperatorInfo* pOptr, void** pOptrExplain, uint32_t* len); + static void destroyOrderOperatorInfo(void* param, int32_t numOfOutput); SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSDataBlock* pResBlock, SArray* pSortInfo, SExprInfo* pExprInfo, int32_t numOfCols, @@ -35,7 +38,7 @@ SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSDataBlock* pR pOperator->pTaskInfo = pTaskInfo; pOperator->fpSet = - createOperatorFpSet(operatorDummyOpenFn, doSort, NULL, NULL, destroyOrderOperatorInfo, NULL, NULL, NULL); + createOperatorFpSet(doOpenSortOperator, doSort, NULL, NULL, destroyOrderOperatorInfo, NULL, NULL, getExplainExecInfo); int32_t code = appendDownstream(pOperator, &downstream, 1); return pOperator; @@ -121,20 +124,17 @@ void applyScalarFunction(SSDataBlock* pBlock, void* param) { } } -SSDataBlock* doSort(SOperatorInfo* pOperator) { - if (pOperator->status == OP_EXEC_DONE) { - return NULL; - } - - SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; +int32_t doOpenSortOperator(SOperatorInfo* pOperator) { SSortOperatorInfo* pInfo = pOperator->info; + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; - if (pOperator->status == OP_RES_TO_RETURN) { - return getSortedBlockData(pInfo->pSortHandle, pInfo->binfo.pRes, pOperator->resultInfo.capacity, pInfo->pColMatchInfo); + if (OPTR_IS_OPENED(pOperator)) { + return TSDB_CODE_SUCCESS; } -// pInfo->binfo.pRes is not equalled to the input datablock. -// int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize; + pInfo->startTs = taosGetTimestampUs(); + + // pInfo->binfo.pRes is not equalled to the input datablock. pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, pInfo->pColMatchInfo, SORT_SINGLESOURCE_SORT, -1, -1, NULL, pTaskInfo->id.str); @@ -146,12 +146,39 @@ SSDataBlock* doSort(SOperatorInfo* pOperator) { int32_t code = tsortOpen(pInfo->pSortHandle); taosMemoryFreeClear(ps); + if (code != TSDB_CODE_SUCCESS) { longjmp(pTaskInfo->env, terrno); } + pOperator->cost.openCost = (taosGetTimestampUs() - pInfo->startTs)/1000.0; pOperator->status = OP_RES_TO_RETURN; - return getSortedBlockData(pInfo->pSortHandle, pInfo->binfo.pRes, pOperator->resultInfo.capacity, pInfo->pColMatchInfo); + + OPTR_SET_OPENED(pOperator); + return TSDB_CODE_SUCCESS; +} + +SSDataBlock* doSort(SOperatorInfo* pOperator) { + if (pOperator->status == OP_EXEC_DONE) { + return NULL; + } + + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + SSortOperatorInfo* pInfo = pOperator->info; + + int32_t code = pOperator->fpSet._openFn(pOperator); + if (code != TSDB_CODE_SUCCESS) { + longjmp(pTaskInfo->env, code); + } + + SSDataBlock* pBlock = getSortedBlockData(pInfo->pSortHandle, pInfo->binfo.pRes, pOperator->resultInfo.capacity, pInfo->pColMatchInfo); + + if (pBlock != NULL) { + pOperator->resultInfo.totalRows += pBlock->info.rows; + } else { + doSetOperatorCompleted(pOperator); + } + return pBlock; } void destroyOrderOperatorInfo(void* param, int32_t numOfOutput) { @@ -161,3 +188,15 @@ void destroyOrderOperatorInfo(void* param, int32_t numOfOutput) { taosArrayDestroy(pInfo->pSortInfo); taosArrayDestroy(pInfo->pColMatchInfo); } + +int32_t getExplainExecInfo(SOperatorInfo* pOptr, void** pOptrExplain, uint32_t* len) { + ASSERT(pOptr != NULL); + SSortExecInfo* pInfo = taosMemoryCalloc(1, sizeof(SSortExecInfo)); + + SSortOperatorInfo *pOperatorInfo = (SSortOperatorInfo*)pOptr->info; + + *pInfo = tsortGetSortExecInfo(pOperatorInfo->pSortHandle); + *pOptrExplain = pInfo; + *len = sizeof(SSortExecInfo); + return TSDB_CODE_SUCCESS; +} diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 588c3e90e7757feb1ca7dcd1950c14f84b38c577..97ee124a6dd26e8f9a8c077f3be9185ac4d8af78 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -1,3 +1,4 @@ +#include "function.h" #include "executorimpl.h" #include "functionMgt.h" #include "tdatablock.h" @@ -9,6 +10,12 @@ typedef enum SResultTsInterpType { } SResultTsInterpType; static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator); +static SSDataBlock* doStreamSessionWindowAgg(SOperatorInfo* pOperator); + +static int64_t* extractTsCol(SSDataBlock* pBlock, const SIntervalAggOperatorInfo* pInfo); + +static SResultRowPosition addToOpenWindowList(SResultRowInfo* pResultRowInfo, const SResultRow* pResult); +static void doCloseWindow(SResultRowInfo* pResultRowInfo, const SIntervalAggOperatorInfo* pInfo, SResultRow* pResult); /* * There are two cases to handle: @@ -20,47 +27,11 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator); * is a previous result generated or not. */ static void setIntervalQueryRange(STableQueryInfo* pTableQueryInfo, TSKEY key, STimeWindow* pQRange) { - // SResultRowInfo* pResultRowInfo = &pTableQueryInfo->resInfo; - // if (pResultRowInfo->curPos != -1) { - // return; - // } - - // pTableQueryInfo->win.skey = key; - // STimeWindow win = {.skey = key, .ekey = pQRange->ekey}; - - /** - * In handling the both ascending and descending order super table query, we need to find the first qualified - * timestamp of this table, and then set the first qualified start timestamp. - * In ascending query, the key is the first qualified timestamp. However, in the descending order query, additional - * operations involve. - */ - // STimeWindow w = TSWINDOW_INITIALIZER; - // - // TSKEY sk = TMIN(win.skey, win.ekey); - // TSKEY ek = TMAX(win.skey, win.ekey); - // getAlignQueryTimeWindow(pQueryAttr, win.skey, sk, ek, &w); - - // if (pResultRowInfo->prevSKey == TSKEY_INITIAL_VAL) { - // if (!QUERY_IS_ASC_QUERY(pQueryAttr)) { - // assert(win.ekey == pQueryAttr->window.ekey); - // } - // - // pResultRowInfo->prevSKey = w.skey; - // } - - // pTableQueryInfo->lastKey = pTableQueryInfo->win.skey; + // do nothing } -static TSKEY getStartTsKey(STimeWindow* win, const TSKEY* tsCols, int32_t rows, bool ascQuery) { - TSKEY ts = TSKEY_INITIAL_VAL; - if (tsCols == NULL) { - ts = ascQuery ? win->skey : win->ekey; - } else { -// int32_t offset = ascQuery ? 0 : rows - 1; - ts = tsCols[0]; - } - - return ts; +static TSKEY getStartTsKey(STimeWindow* win, const TSKEY* tsCols) { + return tsCols == NULL? win->skey:tsCols[0]; } static void getInitialStartTimeWindow(SInterval* pInterval, int32_t precision, TSKEY ts, STimeWindow* w, @@ -133,8 +104,10 @@ static int32_t setTimeWindowOutputBuf(SResultRowInfo* pResultRowInfo, STimeWindo // set time window for current result pResultRow->win = (*win); + *pResult = pResultRow; setResultRowInitCtx(pResultRow, pCtx, numOfOutput, rowCellInfoOffset); + return TSDB_CODE_SUCCESS; } @@ -162,38 +135,38 @@ static void doKeepNewWindowStartInfo(SWindowRowsSup* pRowSup, const int64_t* tsL static FORCE_INLINE int32_t getForwardStepsInBlock(int32_t numOfRows, __block_search_fn_t searchFn, TSKEY ekey, int16_t pos, int16_t order, int64_t* pData) { - int32_t forwardStep = 0; + int32_t forwardRows = 0; if (order == TSDB_ORDER_ASC) { int32_t end = searchFn((char*)&pData[pos], numOfRows - pos, ekey, order); if (end >= 0) { - forwardStep = end; + forwardRows = end; if (pData[end + pos] == ekey) { - forwardStep += 1; + forwardRows += 1; } } } else { int32_t end = searchFn((char*)&pData[pos], numOfRows - pos, ekey, order); if (end >= 0) { - forwardStep = end; + forwardRows = end; if (pData[end + pos] == ekey) { - forwardStep += 1; + forwardRows += 1; } } // int32_t end = searchFn((char*)pData, pos + 1, ekey, order); // if (end >= 0) { -// forwardStep = pos - end; +// forwardRows = pos - end; // // if (pData[end] == ekey) { -// forwardStep += 1; +// forwardRows += 1; // } // } } - assert(forwardStep >= 0); - return forwardStep; + assert(forwardRows >= 0); + return forwardRows; } int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order) { @@ -338,34 +311,40 @@ static void getNextTimeWindow(SInterval* pInterval, int32_t precision, int32_t o tw->ekey -= 1; } -void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo, SArray* pDataBlock, TSKEY prevTs, +void doTimeWindowInterpolation(SIntervalAggOperatorInfo *pInfo, int32_t numOfExprs, SArray* pDataBlock, TSKEY prevTs, int32_t prevRowIndex, TSKEY curTs, int32_t curRowIndex, TSKEY windowKey, int32_t type) { - SExprInfo* pExpr = pOperator->pExpr; + SqlFunctionCtx* pCtx = pInfo->binfo.pCtx; - SqlFunctionCtx* pCtx = pInfo->pCtx; + int32_t index = 1; + for (int32_t k = 0; k < numOfExprs; ++k) { - for (int32_t k = 0; k < pOperator->numOfExprs; ++k) { - int32_t functionId = pCtx[k].functionId; - if (functionId != FUNCTION_TWA && functionId != FUNCTION_INTERP) { + // todo use flag instead of function name + if (strcmp(pCtx[k].pExpr->pExpr->_function.functionName, "twa") != 0) { pCtx[k].start.key = INT64_MIN; continue; } - SColIndex* pColIndex = NULL /*&pExpr[k].base.colInfo*/; - int16_t index = pColIndex->colIndex; - SColumnInfoData* pColInfo = taosArrayGet(pDataBlock, index); +// if (functionId != FUNCTION_TWA && functionId != FUNCTION_INTERP) { +// pCtx[k].start.key = INT64_MIN; +// continue; +// } - // assert(pColInfo->info.colId == pColIndex->info.colId && curTs != windowKey); - double v1 = 0, v2 = 0, v = 0; + SFunctParam* pParam = &pCtx[k].param[0]; + SColumnInfoData* pColInfo = taosArrayGet(pDataBlock, pParam->pCol->slotId); + ASSERT(pColInfo->info.colId == pParam->pCol->colId && curTs != windowKey); + + double v1 = 0, v2 = 0, v = 0; if (prevRowIndex == -1) { - // GET_TYPED_DATA(v1, double, pColInfo->info.type, (char*)pRuntimeEnv->prevRow[index]); + SGroupKeys* p = taosArrayGet(pInfo->pPrevValues, index); + GET_TYPED_DATA(v1, double, pColInfo->info.type, p->pData); } else { - GET_TYPED_DATA(v1, double, pColInfo->info.type, (char*)pColInfo->pData + prevRowIndex * pColInfo->info.bytes); + GET_TYPED_DATA(v1, double, pColInfo->info.type, colDataGetData(pColInfo, prevRowIndex)); } - GET_TYPED_DATA(v2, double, pColInfo->info.type, (char*)pColInfo->pData + curRowIndex * pColInfo->info.bytes); + GET_TYPED_DATA(v2, double, pColInfo->info.type, colDataGetData(pColInfo, curRowIndex)); +#if 0 if (functionId == FUNCTION_INTERP) { if (type == RESULT_ROW_START_INTERP) { pCtx[k].start.key = prevTs; @@ -385,6 +364,8 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo, } } } else if (functionId == FUNCTION_TWA) { +#endif + SPoint point1 = (SPoint){.key = prevTs, .val = &v1}; SPoint point2 = (SPoint){.key = curTs, .val = &v2}; SPoint point = (SPoint){.key = windowKey, .val = &v}; @@ -398,8 +379,13 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo, pCtx[k].end.key = point.key; pCtx[k].end.val = v; } + + index += 1; } +#if 0 } +#endif + } static void setNotInterpoWindowKey(SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t type) { @@ -414,62 +400,59 @@ static void setNotInterpoWindowKey(SqlFunctionCtx* pCtx, int32_t numOfOutput, in } } -static bool setTimeWindowInterpolationStartTs(SOperatorInfo* pOperatorInfo, SqlFunctionCtx* pCtx, int32_t pos, - int32_t numOfRows, SArray* pDataBlock, const TSKEY* tsCols, - STimeWindow* win) { - bool ascQuery = true; +static bool setTimeWindowInterpolationStartTs(SIntervalAggOperatorInfo *pInfo, SqlFunctionCtx* pCtx, int32_t numOfExprs, int32_t pos, + SSDataBlock* pBlock, const TSKEY* tsCols, STimeWindow* win) { + bool ascQuery = (pInfo->order == TSDB_ORDER_ASC); + TSKEY curTs = tsCols[pos]; - TSKEY lastTs = 0; //*(TSKEY*)pRuntimeEnv->prevRow[0]; + + SGroupKeys* pTsKey = taosArrayGet(pInfo->pPrevValues, 0); + TSKEY lastTs = *(int64_t*) pTsKey->pData; // lastTs == INT64_MIN and pos == 0 means this is the first time window, interpolation is not needed. // start exactly from this point, no need to do interpolation TSKEY key = ascQuery ? win->skey : win->ekey; if (key == curTs) { - setNotInterpoWindowKey(pCtx, pOperatorInfo->numOfExprs, RESULT_ROW_START_INTERP); + setNotInterpoWindowKey(pCtx, numOfExprs, RESULT_ROW_START_INTERP); return true; } - if (lastTs == INT64_MIN && ((pos == 0 && ascQuery) || (pos == (numOfRows - 1) && !ascQuery))) { - setNotInterpoWindowKey(pCtx, pOperatorInfo->numOfExprs, RESULT_ROW_START_INTERP); - return true; + // it is the first time window, no need to do interpolation + if (pTsKey->isNull && pos == 0) { + setNotInterpoWindowKey(pCtx, numOfExprs, RESULT_ROW_START_INTERP); + } else { + TSKEY prevTs = ((pos == 0) ? lastTs : tsCols[pos - 1]); + doTimeWindowInterpolation(pInfo, numOfExprs, pBlock->pDataBlock, prevTs, pos - 1, curTs, pos, key, + RESULT_ROW_START_INTERP); } - int32_t step = 1; // GET_FORWARD_DIRECTION_FACTOR(pQueryAttr->order.order); - TSKEY prevTs = ((pos == 0 && ascQuery) || (pos == (numOfRows - 1) && !ascQuery)) ? lastTs : tsCols[pos - step]; - - doTimeWindowInterpolation(pOperatorInfo, pOperatorInfo->info, pDataBlock, prevTs, pos - step, curTs, pos, key, - RESULT_ROW_START_INTERP); return true; } -static bool setTimeWindowInterpolationEndTs(SOperatorInfo* pOperatorInfo, SqlFunctionCtx* pCtx, int32_t endRowIndex, - SArray* pDataBlock, const TSKEY* tsCols, TSKEY blockEkey, - STimeWindow* win) { - int32_t order = TSDB_ORDER_ASC; - int32_t numOfOutput = pOperatorInfo->numOfExprs; +static bool setTimeWindowInterpolationEndTs(SIntervalAggOperatorInfo *pInfo, SqlFunctionCtx* pCtx, int32_t numOfExprs, int32_t endRowIndex, + SArray* pDataBlock, const TSKEY* tsCols, TSKEY blockEkey, STimeWindow* win) { + int32_t order = pInfo->order; TSKEY actualEndKey = tsCols[endRowIndex]; - TSKEY key = order ? win->ekey : win->skey; + TSKEY key = (order == TSDB_ORDER_ASC) ? win->ekey : win->skey; // not ended in current data block, do not invoke interpolation - if ((key > blockEkey /*&& QUERY_IS_ASC_QUERY(pQueryAttr)*/) || - (key < blockEkey /*&& !QUERY_IS_ASC_QUERY(pQueryAttr)*/)) { - setNotInterpoWindowKey(pCtx, numOfOutput, RESULT_ROW_END_INTERP); + if ((key > blockEkey && (order == TSDB_ORDER_ASC)) || (key < blockEkey && (order == TSDB_ORDER_DESC))) { + setNotInterpoWindowKey(pCtx, numOfExprs, RESULT_ROW_END_INTERP); return false; } - // there is actual end point of current time window, no interpolation need + // there is actual end point of current time window, no interpolation needs if (key == actualEndKey) { - setNotInterpoWindowKey(pCtx, numOfOutput, RESULT_ROW_END_INTERP); + setNotInterpoWindowKey(pCtx, numOfExprs, RESULT_ROW_END_INTERP); return true; } - int32_t step = GET_FORWARD_DIRECTION_FACTOR(order); - int32_t nextRowIndex = endRowIndex + step; + int32_t nextRowIndex = endRowIndex + 1; assert(nextRowIndex >= 0); TSKEY nextKey = tsCols[nextRowIndex]; - doTimeWindowInterpolation(pOperatorInfo, pOperatorInfo->info, pDataBlock, actualEndKey, endRowIndex, nextKey, + doTimeWindowInterpolation(pInfo, numOfExprs, pDataBlock, actualEndKey, endRowIndex, nextKey, nextRowIndex, key, RESULT_ROW_END_INTERP); return true; } @@ -541,8 +524,8 @@ static int32_t getNextQualifiedWindow(SInterval* pInterval, STimeWindow* pNext, return startPos; } -static bool resultRowInterpolated(SResultRow* pResult, SResultTsInterpType type) { - assert(pResult != NULL && (type == RESULT_ROW_START_INTERP || type == RESULT_ROW_END_INTERP)); +static bool isResultRowInterpolated(SResultRow* pResult, SResultTsInterpType type) { + ASSERT(pResult != NULL && (type == RESULT_ROW_START_INTERP || type == RESULT_ROW_END_INTERP)); if (type == RESULT_ROW_START_INTERP) { return pResult->startInterp == true; } else { @@ -559,34 +542,29 @@ static void setResultRowInterpo(SResultRow* pResult, SResultTsInterpType type) { } } -static void doWindowBorderInterpolation(SOperatorInfo* pOperatorInfo, SSDataBlock* pBlock, SqlFunctionCtx* pCtx, - SResultRow* pResult, STimeWindow* win, int32_t startPos, int32_t forwardStep, - int32_t order, bool timeWindowInterpo) { - if (!timeWindowInterpo) { +static void doWindowBorderInterpolation(SIntervalAggOperatorInfo *pInfo, SSDataBlock* pBlock, int32_t numOfExprs, SqlFunctionCtx* pCtx, + SResultRow* pResult, STimeWindow* win, int32_t startPos, int32_t forwardRows) { + if (!pInfo->timeWindowInterpo) { return; } - assert(pBlock != NULL); - int32_t step = GET_FORWARD_DIRECTION_FACTOR(order); - + ASSERT(pBlock != NULL); if (pBlock->pDataBlock == NULL) { // tscError("pBlock->pDataBlock == NULL"); return; } - SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, 0); + SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, pInfo->primaryTsIndex); TSKEY* tsCols = (TSKEY*)(pColInfo->pData); - bool done = resultRowInterpolated(pResult, RESULT_ROW_START_INTERP); + bool done = isResultRowInterpolated(pResult, RESULT_ROW_START_INTERP); if (!done) { // it is not interpolated, now start to generated the interpolated value - int32_t startRowIndex = startPos; - bool interp = setTimeWindowInterpolationStartTs(pOperatorInfo, pCtx, startRowIndex, pBlock->info.rows, - pBlock->pDataBlock, tsCols, win); + bool interp = setTimeWindowInterpolationStartTs(pInfo, pCtx, numOfExprs, startPos, pBlock, tsCols, win); if (interp) { setResultRowInterpo(pResult, RESULT_ROW_START_INTERP); } } else { - setNotInterpoWindowKey(pCtx, pOperatorInfo->numOfExprs, RESULT_ROW_START_INTERP); + setNotInterpoWindowKey(pCtx, numOfExprs, RESULT_ROW_START_INTERP); } // point interpolation does not require the end key time window interpolation. @@ -595,139 +573,256 @@ static void doWindowBorderInterpolation(SOperatorInfo* pOperatorInfo, SSDataBloc // } // interpolation query does not generate the time window end interpolation - done = resultRowInterpolated(pResult, RESULT_ROW_END_INTERP); + done = isResultRowInterpolated(pResult, RESULT_ROW_END_INTERP); if (!done) { - int32_t endRowIndex = startPos + (forwardStep - 1) * step; + int32_t endRowIndex = startPos + forwardRows - 1; - TSKEY endKey = (order == TSDB_ORDER_ASC) ? pBlock->info.window.ekey : pBlock->info.window.skey; + TSKEY endKey = (pInfo->order == TSDB_ORDER_ASC) ? pBlock->info.window.ekey : pBlock->info.window.skey; bool interp = - setTimeWindowInterpolationEndTs(pOperatorInfo, pCtx, endRowIndex, pBlock->pDataBlock, tsCols, endKey, win); + setTimeWindowInterpolationEndTs(pInfo, pCtx, numOfExprs, endRowIndex, pBlock->pDataBlock, tsCols, endKey, win); if (interp) { setResultRowInterpo(pResult, RESULT_ROW_END_INTERP); } } else { - setNotInterpoWindowKey(pCtx, pOperatorInfo->numOfExprs, RESULT_ROW_END_INTERP); + setNotInterpoWindowKey(pCtx, numOfExprs, RESULT_ROW_END_INTERP); } } -static void saveDataBlockLastRow(char** pRow, SArray* pDataBlock, int32_t rowIndex, int32_t numOfCols) { - if (pDataBlock == NULL) { +static void saveDataBlockLastRow(SArray* pPrevKeys, const SSDataBlock* pBlock, SArray* pCols) { + if (pBlock->pDataBlock == NULL) { return; } - for (int32_t k = 0; k < numOfCols; ++k) { - SColumnInfoData* pColInfo = taosArrayGet(pDataBlock, k); - memcpy(pRow[k], ((char*)pColInfo->pData) + (pColInfo->info.bytes * rowIndex), pColInfo->info.bytes); + size_t num = taosArrayGetSize(pPrevKeys); + for (int32_t k = 0; k < num; ++k) { + SColumn* pc = taosArrayGet(pCols, k); + + SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, pc->slotId); + + SGroupKeys* pkey = taosArrayGet(pPrevKeys, k); + for(int32_t i = pBlock->info.rows - 1; i >= 0; --i) { + if (colDataIsNull_s(pColInfo, i)) { + continue; + } + + char* val = colDataGetData(pColInfo, i); + if (IS_VAR_DATA_TYPE(pkey->type)) { + memcpy(pkey->pData, val, varDataTLen(val)); + ASSERT(varDataTLen(val) <= pkey->bytes); + } else { + memcpy(pkey->pData, val, pkey->bytes); + } + + break; + } } } -static SArray* hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pBlock, - uint64_t tableGroupId) { +static void doInterpUnclosedTimeWindow(SOperatorInfo* pOperatorInfo, int32_t numOfExprs, SResultRowInfo* pResultRowInfo, + SSDataBlock* pBlock, int32_t scanFlag, int64_t* tsCols, SResultRowPosition* p) { + SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo; + SIntervalAggOperatorInfo* pInfo = (SIntervalAggOperatorInfo*)pOperatorInfo->info; - SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo; - int32_t numOfOutput = pOperatorInfo->numOfExprs; + int32_t startPos = 0; + int32_t numOfOutput = pOperatorInfo->numOfExprs; + uint64_t groupId = pBlock->info.groupId; - SArray* pUpdated = NULL; - if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM) { - pUpdated = taosArrayInit(4, POINTER_BYTES); + SResultRow* pResult = NULL; + + while (1) { + SListNode* pn = tdListGetHead(pResultRowInfo->openWindow); + + SResultRowPosition* p1 = (SResultRowPosition*)pn->data; + if (p->pageId == p1->pageId && p->offset == p1->offset) { + break; + } + + SResultRow* pr = getResultRowByPos(pInfo->aggSup.pResultBuf, p1); + ASSERT(pr->offset == p1->offset && pr->pageId == p1->pageId); + + if (pr->closed) { + ASSERT(isResultRowInterpolated(pr, RESULT_ROW_START_INTERP) && isResultRowInterpolated(pr, RESULT_ROW_END_INTERP)); + tdListPopHead(pResultRowInfo->openWindow); + continue; + } + + STimeWindow w = pr->win; + int32_t ret = setTimeWindowOutputBuf(pResultRowInfo, &w, (scanFlag == MAIN_SCAN), &pResult, groupId, pInfo->binfo.pCtx, + numOfOutput, pInfo->binfo.rowCellInfoOffset, &pInfo->aggSup, pTaskInfo); + if (ret != TSDB_CODE_SUCCESS) { + longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + ASSERT(!isResultRowInterpolated(pResult, RESULT_ROW_END_INTERP)); + + SGroupKeys *pTsKey = taosArrayGet(pInfo->pPrevValues, 0); + int64_t prevTs = *(int64_t*) pTsKey->pData; + doTimeWindowInterpolation(pInfo, numOfOutput, pBlock->pDataBlock, prevTs, -1, tsCols[startPos], startPos, + w.ekey, RESULT_ROW_END_INTERP); + + setResultRowInterpo(pResult, RESULT_ROW_END_INTERP); + setNotInterpoWindowKey(pInfo->binfo.pCtx, numOfExprs, RESULT_ROW_START_INTERP); + + doApplyFunctions(pTaskInfo, pInfo->binfo.pCtx, &w, &pInfo->twAggSup.timeWindowData, startPos, 0, tsCols, + pBlock->info.rows, numOfExprs, pInfo->order); + + if (isResultRowInterpolated(pResult, RESULT_ROW_END_INTERP)) { + closeResultRow(pr); + tdListPopHead(pResultRowInfo->openWindow); + } else { // the remains are can not be closed yet. + break; + } } +} - int32_t step = 1; - bool ascScan = (pInfo->order == TSDB_ORDER_ASC); +typedef int64_t (*__get_value_fn_t)(void* data, int32_t index); - // int32_t prevIndex = pResultRowInfo->curPos; +int32_t binarySearch(void* keyList, int num, TSKEY key, int order, + __get_value_fn_t getValuefn) { + int firstPos = 0, lastPos = num - 1, midPos = -1; + int numOfRows = 0; - TSKEY* tsCols = NULL; - if (pBlock->pDataBlock != NULL) { - SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, pInfo->primaryTsIndex); - tsCols = (int64_t*)pColDataInfo->pData; + if (num <= 0) return -1; + if (order == TSDB_ORDER_DESC) { + // find the first position which is smaller or equal than the key + while (1) { + if (key >= getValuefn(keyList, lastPos)) return lastPos; + if (key == getValuefn(keyList, firstPos)) return firstPos; + if (key < getValuefn(keyList, firstPos)) return firstPos - 1; - if (tsCols != NULL) { - blockDataUpdateTsWindow(pBlock, pInfo->primaryTsIndex); + numOfRows = lastPos - firstPos + 1; + midPos = (numOfRows >> 1) + firstPos; + + if (key < getValuefn(keyList, midPos)) { + lastPos = midPos - 1; + } else if (key > getValuefn(keyList, midPos)) { + firstPos = midPos + 1; + } else { + break; + } + } + + } else { + // find the first position which is bigger or equal than the key + while (1) { + if (key <= getValuefn(keyList, firstPos)) return firstPos; + if (key == getValuefn(keyList, lastPos)) return lastPos; + + if (key > getValuefn(keyList, lastPos)) { + lastPos = lastPos + 1; + if (lastPos >= num) + return -1; + else + return lastPos; + } + + numOfRows = lastPos - firstPos + 1; + midPos = (numOfRows >> 1) + firstPos; + + if (key < getValuefn(keyList, midPos)) { + lastPos = midPos - 1; + } else if (key > getValuefn(keyList, midPos)) { + firstPos = midPos + 1; + } else { + break; + } } } - int32_t startPos = 0; - TSKEY ts = getStartTsKey(&pBlock->info.window, tsCols, pBlock->info.rows, ascScan); + return midPos; +} + +int64_t getReskey(void* data, int32_t index) { + SArray* res = (SArray*) data; + SResKeyPos* pos = taosArrayGetP(res, index); + return *(int64_t*)pos->key; +} + +static int32_t saveResult(SResultRow* result, uint64_t groupId, SArray* pUpdated) { + int32_t size = taosArrayGetSize(pUpdated); + int32_t index = binarySearch(pUpdated, size, result->win.skey, TSDB_ORDER_DESC, getReskey); + if (index == -1) { + index = 0; + } else { + TSKEY resTs = getReskey(pUpdated, index); + if (resTs < result->win.skey) { + index++; + } else { + return TSDB_CODE_SUCCESS; + } + } + + SResKeyPos* newPos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t)); + if (newPos == NULL) { + return TSDB_CODE_OUT_OF_MEMORY; + } + newPos->groupId = groupId; + newPos->pos = (SResultRowPosition){.pageId = result->pageId, .offset = result->offset}; + *(int64_t*)newPos->key = result->win.skey; + if (taosArrayInsert(pUpdated, index, &newPos) == NULL ){ + return TSDB_CODE_OUT_OF_MEMORY; + } + return TSDB_CODE_SUCCESS; +} + +static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pBlock, + int32_t scanFlag, SArray* pUpdated) { + SIntervalAggOperatorInfo* pInfo = (SIntervalAggOperatorInfo*)pOperatorInfo->info; + + SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo; + + int32_t startPos = 0; + int32_t numOfOutput = pOperatorInfo->numOfExprs; + int64_t *tsCols = extractTsCol(pBlock, pInfo); + uint64_t tableGroupId = pBlock->info.groupId; + bool ascScan = (pInfo->order == TSDB_ORDER_ASC); + TSKEY ts = getStartTsKey(&pBlock->info.window, tsCols); + SResultRow* pResult = NULL; STimeWindow win = getActiveTimeWindow(pInfo->aggSup.pResultBuf, pResultRowInfo, ts, &pInfo->interval, pInfo->interval.precision, &pInfo->win); - bool masterScan = true; - SResultRow* pResult = NULL; - int32_t ret = setTimeWindowOutputBuf(pResultRowInfo, &win, masterScan, &pResult, tableGroupId, pInfo->binfo.pCtx, + int32_t ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pInfo->binfo.pCtx, numOfOutput, pInfo->binfo.rowCellInfoOffset, &pInfo->aggSup, pTaskInfo); if (ret != TSDB_CODE_SUCCESS || pResult == NULL) { longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } - if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM) { - SResKeyPos* pos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t)); - pos->groupId = tableGroupId; - pos->pos = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset}; - *(int64_t*)pos->key = pResult->win.skey; - - taosArrayPush(pUpdated, &pos); + if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM && + (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE || pInfo->twAggSup.calTrigger == 0)) { + saveResult(pResult, tableGroupId, pUpdated); } - int32_t forwardStep = 0; TSKEY ekey = ascScan? win.ekey:win.skey; - forwardStep = - getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, pInfo->order); - ASSERT(forwardStep > 0); + int32_t forwardRows = getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, pInfo->order); + ASSERT(forwardRows > 0); // prev time window not interpolation yet. - // int32_t curIndex = pResultRowInfo->curPos; - -#if 0 - if (prevIndex != -1 && prevIndex < curIndex && pInfo->timeWindowInterpo) { - for (int32_t j = prevIndex; j < curIndex; ++j) { // previous time window may be all closed already. - SResultRow* pRes = getResultRow(pResultRowInfo, j); - if (pRes->closed) { - assert(resultRowInterpolated(pRes, RESULT_ROW_START_INTERP) && resultRowInterpolated(pRes, RESULT_ROW_END_INTERP)); - continue; - } - - STimeWindow w = pRes->win; - ret = setTimeWindowOutputBuf(pResultRowInfo, pBlock->info.uid, &w, masterScan, &pResult, tableGroupId, - pInfo->binfo.pCtx, numOfOutput, pInfo->binfo.rowCellInfoOffset, &pInfo->aggSup, - pTaskInfo); - if (ret != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - assert(!resultRowInterpolated(pResult, RESULT_ROW_END_INTERP)); - doTimeWindowInterpolation(pOperatorInfo, &pInfo->binfo, pBlock->pDataBlock, *(TSKEY*)pInfo->pRow[0], -1, - tsCols[startPos], startPos, w.ekey, RESULT_ROW_END_INTERP); - - setResultRowInterpo(pResult, RESULT_ROW_END_INTERP); - setNotInterpoWindowKey(pInfo->binfo.pCtx, pOperatorInfo->numOfExprs, RESULT_ROW_START_INTERP); - - doApplyFunctions(pInfo->binfo.pCtx, &w, &pInfo->timeWindowData, startPos, 0, tsCols, pBlock->info.rows, numOfOutput, TSDB_ORDER_ASC); - } + if (pInfo->timeWindowInterpo) { + SResultRowPosition pos = addToOpenWindowList(pResultRowInfo, pResult); + doInterpUnclosedTimeWindow(pOperatorInfo, numOfOutput, pResultRowInfo, pBlock, scanFlag, tsCols, &pos); // restore current time window - ret = setTimeWindowOutputBuf(pResultRowInfo, pBlock->info.uid, &win, masterScan, &pResult, tableGroupId, - pInfo->binfo.pCtx, numOfOutput, pInfo->binfo.rowCellInfoOffset, &pInfo->aggSup, - pTaskInfo); + ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pInfo->binfo.pCtx, + numOfOutput, pInfo->binfo.rowCellInfoOffset, &pInfo->aggSup, pTaskInfo); if (ret != TSDB_CODE_SUCCESS) { longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } - } -#endif - // window start key interpolation - doWindowBorderInterpolation(pOperatorInfo, pBlock, pInfo->binfo.pCtx, pResult, &win, startPos, forwardStep, - pInfo->order, false); + // window start key interpolation + doWindowBorderInterpolation(pInfo, pBlock, numOfOutput, pInfo->binfo.pCtx, pResult, &win, startPos, forwardRows); + } updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &win, true); - doApplyFunctions(pTaskInfo, pInfo->binfo.pCtx, &win, &pInfo->twAggSup.timeWindowData, startPos, forwardStep, tsCols, - pBlock->info.rows, numOfOutput, TSDB_ORDER_ASC); + doApplyFunctions(pTaskInfo, pInfo->binfo.pCtx, &win, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, tsCols, + pBlock->info.rows, numOfOutput, pInfo->order); + + doCloseWindow(pResultRowInfo, pInfo, pResult); STimeWindow nextWin = win; while (1) { - int32_t prevEndPos = (forwardStep - 1) * step + startPos; + int32_t prevEndPos = forwardRows - 1 + startPos; startPos = getNextQualifiedWindow(&pInfo->interval, &nextWin, &pBlock->info, tsCols, prevEndPos, pInfo->order); if (startPos < 0) { break; @@ -735,41 +830,71 @@ static SArray* hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe // null data, failed to allocate more memory buffer int32_t code = - setTimeWindowOutputBuf(pResultRowInfo, &nextWin, masterScan, &pResult, tableGroupId, pInfo->binfo.pCtx, + setTimeWindowOutputBuf(pResultRowInfo, &nextWin, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pInfo->binfo.pCtx, numOfOutput, pInfo->binfo.rowCellInfoOffset, &pInfo->aggSup, pTaskInfo); if (code != TSDB_CODE_SUCCESS || pResult == NULL) { longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } - if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM) { - SResKeyPos* pos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t)); - pos->groupId = tableGroupId; - pos->pos = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset}; - *(int64_t*)pos->key = pResult->win.skey; - - taosArrayPush(pUpdated, &pos); + if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM && + (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE || pInfo->twAggSup.calTrigger == 0)) { + saveResult(pResult, tableGroupId, pUpdated); } ekey = ascScan? nextWin.ekey:nextWin.skey; - forwardStep = + forwardRows = getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, pInfo->order); // window start(end) key interpolation - doWindowBorderInterpolation(pOperatorInfo, pBlock, pInfo->binfo.pCtx, pResult, &nextWin, startPos, forwardStep, - pInfo->order, false); + doWindowBorderInterpolation(pInfo, pBlock, numOfOutput, pInfo->binfo.pCtx, pResult, &nextWin, startPos, forwardRows); updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &nextWin, true); - doApplyFunctions(pTaskInfo, pInfo->binfo.pCtx, &nextWin, &pInfo->twAggSup.timeWindowData, startPos, forwardStep, tsCols, - pBlock->info.rows, numOfOutput, TSDB_ORDER_ASC); + doApplyFunctions(pTaskInfo, pInfo->binfo.pCtx, &nextWin, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, tsCols, + pBlock->info.rows, numOfOutput, pInfo->order); + doCloseWindow(pResultRowInfo, pInfo, pResult); } if (pInfo->timeWindowInterpo) { - int32_t rowIndex = ascScan ? (pBlock->info.rows - 1) : 0; - saveDataBlockLastRow(pInfo->pRow, pBlock->pDataBlock, rowIndex, pBlock->info.numOfCols); + saveDataBlockLastRow(pInfo->pPrevValues, pBlock, pInfo->pInterpCols); } +} - return pUpdated; - // updateResultRowInfoActiveIndex(pResultRowInfo, &pInfo->win, pRuntimeEnv->current->lastKey, true, false); +void doCloseWindow(SResultRowInfo* pResultRowInfo, const SIntervalAggOperatorInfo* pInfo, SResultRow* pResult) { + // current result is done in computing final results. + if (pInfo->timeWindowInterpo && isResultRowInterpolated(pResult, RESULT_ROW_END_INTERP)) { + closeResultRow(pResult); + tdListPopHead(pResultRowInfo->openWindow); + } +} + +SResultRowPosition addToOpenWindowList(SResultRowInfo* pResultRowInfo, const SResultRow* pResult) { + SResultRowPosition pos = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset}; + SListNode* pn = tdListGetTail(pResultRowInfo->openWindow); + if (pn == NULL) { + tdListAppend(pResultRowInfo->openWindow, &pos); + return pos; + } + + SResultRowPosition* px = (SResultRowPosition*)pn->data; + if (px->pageId != pos.pageId || px->offset != pos.offset) { + tdListAppend(pResultRowInfo->openWindow, &pos); + } + + return pos; +} + +int64_t* extractTsCol(SSDataBlock* pBlock, const SIntervalAggOperatorInfo* pInfo) { + TSKEY* tsCols = NULL; + if (pBlock->pDataBlock != NULL) { + SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, pInfo->primaryTsIndex); + tsCols = (int64_t*)pColDataInfo->pData; + + if (tsCols != NULL) { + blockDataUpdateTsWindow(pBlock, pInfo->primaryTsIndex); + } + } + + return tsCols; } static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) { @@ -782,13 +907,11 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) { int32_t scanFlag = MAIN_SCAN; + int64_t st = taosGetTimestampUs(); SOperatorInfo* downstream = pOperator->pDownstream[0]; while (1) { - publishOperatorProfEvent(downstream, QUERY_PROF_BEFORE_OPERATOR_EXEC); SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream); - publishOperatorProfEvent(downstream, QUERY_PROF_AFTER_OPERATOR_EXEC); - if (pBlock == NULL) { break; } @@ -800,17 +923,17 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) { STableQueryInfo* pTableQueryInfo = pInfo->pCurrent; setIntervalQueryRange(pTableQueryInfo, pBlock->info.window.skey, &pTaskInfo->window); - hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, pBlock->info.groupId); + hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, scanFlag, NULL); #if 0 // test for encode/decode result info - if(pOperator->encodeResultRow){ + if(pOperator->fpSet.encodeResultRow){ char *result = NULL; int32_t length = 0; SAggSupporter *pSup = &pInfo->aggSup; - pOperator->encodeResultRow(pOperator, pSup, &pInfo->binfo, &result, &length); + pOperator->fpSet.encodeResultRow(pOperator, &result, &length); taosHashClear(pSup->pResultRowHashTable); pInfo->binfo.resultRowInfo.size = 0; - pOperator->decodeResultRow(pOperator, pSup, &pInfo->binfo, result, length); + pOperator->fpSet.decodeResultRow(pOperator, result); if(result){ taosMemoryFree(result); } @@ -821,6 +944,8 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) { closeAllResultRows(&pInfo->binfo.resultRowInfo); initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, pInfo->order); OPTR_SET_OPENED(pOperator); + + pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0; return TSDB_CODE_SUCCESS; } @@ -929,8 +1054,9 @@ static SSDataBlock* doStateWindowAgg(SOperatorInfo* pOperator) { } SStateWindowOperatorInfo* pInfo = pOperator->info; - SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; - SOptrBasicInfo* pBInfo = &pInfo->binfo; + + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + SOptrBasicInfo* pBInfo = &pInfo->binfo; if (pOperator->status == OP_RES_TO_RETURN) { doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); @@ -943,13 +1069,11 @@ static SSDataBlock* doStateWindowAgg(SOperatorInfo* pOperator) { } int32_t order = TSDB_ORDER_ASC; + int64_t st = taosGetTimestampUs(); SOperatorInfo* downstream = pOperator->pDownstream[0]; while (1) { - publishOperatorProfEvent(downstream, QUERY_PROF_BEFORE_OPERATOR_EXEC); SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream); - publishOperatorProfEvent(downstream, QUERY_PROF_AFTER_OPERATOR_EXEC); - if (pBlock == NULL) { break; } @@ -960,6 +1084,8 @@ static SSDataBlock* doStateWindowAgg(SOperatorInfo* pOperator) { doStateWindowAggImpl(pOperator, pInfo, pBlock); } + pOperator->cost.openCost = (taosGetTimestampUs() - st)/1000.0; + pOperator->status = OP_RES_TO_RETURN; closeAllResultRows(&pBInfo->resultRowInfo); @@ -970,7 +1096,10 @@ static SSDataBlock* doStateWindowAgg(SOperatorInfo* pOperator) { doSetOperatorCompleted(pOperator); } - return pBInfo->pRes->info.rows == 0 ? NULL : pBInfo->pRes; + size_t rows = pBInfo->pRes->info.rows; + pOperator->resultInfo.totalRows += rows; + + return (rows == 0)? NULL : pBInfo->pRes; } static SSDataBlock* doBuildIntervalResult(SOperatorInfo* pOperator) { @@ -998,7 +1127,10 @@ static SSDataBlock* doBuildIntervalResult(SOperatorInfo* pOperator) { doSetOperatorCompleted(pOperator); } - return pBlock->info.rows == 0 ? NULL : pBlock; + size_t rows = pBlock->info.rows; + pOperator->resultInfo.totalRows += rows; + + return (rows == 0)? NULL:pBlock; } } @@ -1033,13 +1165,8 @@ static void setInverFunction(SqlFunctionCtx* pCtx, int32_t num, EStreamType type } } -void doClearWindow(SAggSupporter* pSup, SOptrBasicInfo* pBinfo, char* pData, - int16_t bytes, uint64_t groupId, int32_t numOfOutput) { - SET_RES_WINDOW_KEY(pSup->keyBuf, pData, bytes, groupId); - SResultRowPosition* p1 = - (SResultRowPosition*)taosHashGet(pSup->pResultRowHashTable, pSup->keyBuf, - GET_RES_WINDOW_KEY_LEN(bytes)); - SResultRow* pResult = getResultRowByPos(pSup->pResultBuf, p1); +void doClearWindowImpl(SResultRowPosition* p1, SDiskbasedBuf* pResultBuf, SOptrBasicInfo* pBinfo, int32_t numOfOutput) { + SResultRow* pResult = getResultRowByPos(pResultBuf, p1); SqlFunctionCtx* pCtx = pBinfo->pCtx; for (int32_t i = 0; i < numOfOutput; ++i) { pCtx[i].resultInfo = getResultCell(pResult, i, pBinfo->rowCellInfoOffset); @@ -1054,22 +1181,68 @@ void doClearWindow(SAggSupporter* pSup, SOptrBasicInfo* pBinfo, char* pData, } } +void doClearWindow(SAggSupporter* pSup, SOptrBasicInfo* pBinfo, char* pData, + int16_t bytes, uint64_t groupId, int32_t numOfOutput) { + SET_RES_WINDOW_KEY(pSup->keyBuf, pData, bytes, groupId); + SResultRowPosition* p1 = + (SResultRowPosition*)taosHashGet(pSup->pResultRowHashTable, pSup->keyBuf, + GET_RES_WINDOW_KEY_LEN(bytes)); + doClearWindowImpl(p1, pSup->pResultBuf, pBinfo, numOfOutput); +} + static void doClearWindows(SAggSupporter* pSup, SOptrBasicInfo* pBinfo, - SInterval* pIntrerval, int32_t tsIndex, int32_t numOfOutput, SSDataBlock* pBlock) { + SInterval* pInterval, int32_t tsIndex, int32_t numOfOutput, SSDataBlock* pBlock, + SArray* pUpWins) { SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, tsIndex); TSKEY *tsCols = (TSKEY*)pColDataInfo->pData; int32_t step = 0; for (int32_t i = 0; i < pBlock->info.rows; i += step) { SResultRowInfo dumyInfo; dumyInfo.cur.pageId = -1; - STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[i], pIntrerval, - pIntrerval->precision, NULL); + STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[i], pInterval, + pInterval->precision, NULL); step = getNumOfRowsInTimeWindow(&pBlock->info, tsCols, i, win.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC); doClearWindow(pSup, pBinfo, (char*)&win.skey, sizeof(TKEY), pBlock->info.groupId, numOfOutput); + if (pUpWins) { + taosArrayPush(pUpWins, &win); + } } } +static int32_t closeIntervalWindow(SHashObj *pHashMap, STimeWindowAggSupp *pSup, + SInterval* pInterval, SArray* closeWins) { + void *pIte = NULL; + size_t keyLen = 0; + while((pIte = taosHashIterate(pHashMap, pIte)) != NULL) { + void* key = taosHashGetKey(pIte, &keyLen); + uint64_t groupId = *(uint64_t*) key; + ASSERT(keyLen == GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY))); + TSKEY ts = *(uint64_t*) ((char*)key + sizeof(uint64_t)); + SResultRowInfo dumyInfo; + dumyInfo.cur.pageId = -1; + STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, ts, pInterval, + pInterval->precision, NULL); + if (win.ekey < pSup->maxTs - pSup->waterMark) { + char keyBuf[GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY))]; + SET_RES_WINDOW_KEY(keyBuf, &ts, sizeof(TSKEY), groupId); + taosHashRemove(pHashMap, keyBuf, keyLen); + SResKeyPos* pos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t)); + if (pos == NULL) { + return TSDB_CODE_OUT_OF_MEMORY; + } + pos->groupId = groupId; + pos->pos = *(SResultRowPosition*) pIte; + *(int64_t*)pos->key = ts; + if (!taosArrayPush(closeWins, &pos)) { + taosMemoryFree(pos); + return TSDB_CODE_OUT_OF_MEMORY; + } + } + } + return TSDB_CODE_SUCCESS; +} + static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { SIntervalAggOperatorInfo* pInfo = pOperator->info; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; @@ -1090,17 +1263,16 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { SOperatorInfo* downstream = pOperator->pDownstream[0]; - SArray* pUpdated = NULL; + SArray* pUpdated = taosArrayInit(4, POINTER_BYTES); + SArray* pClosed = taosArrayInit(4, POINTER_BYTES); + while (1) { - publishOperatorProfEvent(downstream, QUERY_PROF_BEFORE_OPERATOR_EXEC); SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream); - publishOperatorProfEvent(downstream, QUERY_PROF_AFTER_OPERATOR_EXEC); - if (pBlock == NULL) { break; } - // The timewindows that overlaps the timestamps of the input pBlock need to be recalculated and return to the + // The timewindow that overlaps the timestamps of the input pBlock need to be recalculated and return to the // caller. Note that all the time window are not close till now. // the pDataBlock are always the same one, no need to call this again setInputDataBlock(pOperator, pInfo->binfo.pCtx, pBlock, pInfo->order, MAIN_SCAN, true); @@ -1109,15 +1281,23 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { } if (pBlock->info.type == STREAM_REPROCESS) { - doClearWindows(&pInfo->aggSup, &pInfo->binfo, &pInfo->interval, - pInfo->primaryTsIndex, pOperator->numOfExprs, pBlock); + doClearWindows(&pInfo->aggSup, &pInfo->binfo, &pInfo->interval, 0, + pOperator->numOfExprs, pBlock, NULL); qDebug("%s clear existed time window results for updates checked", GET_TASKID(pTaskInfo)); continue; } - pUpdated = hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, 0); + pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey); + hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, MAIN_SCAN, pUpdated); } + closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, &pInfo->interval, pClosed); + finalizeUpdatedResult(pOperator->numOfExprs, pInfo->aggSup.pResultBuf, pClosed, pInfo->binfo.rowCellInfoOffset); + if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER__WINDOW_CLOSE) { + taosArrayAddAll(pUpdated, pClosed); + } + + taosArrayDestroy(pClosed); finalizeUpdatedResult(pOperator->numOfExprs, pInfo->aggSup.pResultBuf, pUpdated, pInfo->binfo.rowCellInfoOffset); initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated); @@ -1145,9 +1325,18 @@ void destroyStreamFinalIntervalOperatorInfo(void* param, int32_t numOfOutput) { SStreamFinalIntervalOperatorInfo* pInfo = (SStreamFinalIntervalOperatorInfo *)param; doDestroyBasicInfo(&pInfo->binfo, numOfOutput); cleanupAggSup(&pInfo->aggSup); + if (pInfo->pChildren) { + int32_t size = taosArrayGetSize(pInfo->pChildren); + for (int32_t i = 0; i < size; i++) { + SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, i); + destroyIntervalOperatorInfo(pChildOp->info, numOfOutput); + taosMemoryFreeClear(pChildOp->info); + taosMemoryFreeClear(pChildOp); + } + } } -bool allInvertible(SqlFunctionCtx* pFCtx, int32_t numOfCols) { +static bool allInvertible(SqlFunctionCtx* pFCtx, int32_t numOfCols) { for (int32_t i = 0; i < numOfCols; i++) { if (!fmIsInvertible(pFCtx[i].functionId)) { return false; @@ -1156,21 +1345,65 @@ bool allInvertible(SqlFunctionCtx* pFCtx, int32_t numOfCols) { return true; } +static bool timeWindowinterpNeeded(SqlFunctionCtx* pCtx, int32_t numOfCols, SIntervalAggOperatorInfo* pInfo) { + // the primary timestamp column + bool needed = false; + pInfo->pInterpCols = taosArrayInit(4, sizeof(SColumn)); + pInfo->pPrevValues = taosArrayInit(4, sizeof(SGroupKeys)); + + { // ts column + SColumn c = {0}; + c.colId = 1; + c.slotId = pInfo->primaryTsIndex; + c.type = TSDB_DATA_TYPE_TIMESTAMP; + c.bytes = sizeof(int64_t); + taosArrayPush(pInfo->pInterpCols, &c); + + SGroupKeys key = {0}; + key.bytes = c.bytes; + key.type = c.type; + key.isNull = true; // to denote no value is assigned yet + key.pData = taosMemoryCalloc(1, c.bytes); + taosArrayPush(pInfo->pPrevValues, &key); + } + + for(int32_t i = 0; i < numOfCols; ++i) { + SExprInfo* pExpr = pCtx[i].pExpr; + + if (strcmp(pExpr->pExpr->_function.functionName, "twa") == 0) { + SFunctParam* pParam = &pExpr->base.pParam[0]; + + SColumn c = *pParam->pCol; + taosArrayPush(pInfo->pInterpCols, &c); + needed = true; + + SGroupKeys key = {0}; + key.bytes = c.bytes; + key.type = c.type; + key.isNull = false; + key.pData = taosMemoryCalloc(1, c.bytes); + taosArrayPush(pInfo->pPrevValues, &key); + } + } + + return needed; +} + SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId, - STimeWindowAggSupp* pTwAggSupp, const STableGroupInfo* pTableGroupInfo, - SExecTaskInfo* pTaskInfo) { + STimeWindowAggSupp* pTwAggSupp, SExecTaskInfo* pTaskInfo) { SIntervalAggOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SIntervalAggOperatorInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { goto _error; } - pInfo->order = TSDB_ORDER_ASC; - pInfo->interval = *pInterval; + pInfo->win = pTaskInfo->window; + pInfo->order = TSDB_ORDER_ASC; + pInfo->interval = *pInterval; pInfo->execModel = pTaskInfo->execModel; - pInfo->win = pTaskInfo->window; - pInfo->twAggSup = *pTwAggSupp; + pInfo->twAggSup = *pTwAggSupp; + pInfo->primaryTsIndex = primaryTsSlotId; size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES; @@ -1180,7 +1413,14 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* initAggInfo(&pInfo->binfo, &pInfo->aggSup, pExprInfo, numOfCols, pResBlock, keyBufSize, pTaskInfo->id.str); initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pInfo->win); + pInfo->invertible = allInvertible(pInfo->binfo.pCtx, numOfCols); + pInfo->invertible = false; // Todo(liuyao): Dependent TSDB API + + pInfo->timeWindowInterpo = timeWindowinterpNeeded(pInfo->binfo.pCtx, numOfCols, pInfo); + if (pInfo->timeWindowInterpo) { + pInfo->binfo.resultRowInfo.openWindow = tdListNew(sizeof(SResultRowPosition)); + } // pInfo->pTableQueryInfo = initTableQueryInfo(pTableGroupInfo); if (code != TSDB_CODE_SUCCESS /* || pInfo->pTableQueryInfo == NULL*/) { @@ -1189,14 +1429,14 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* initResultRowInfo(&pInfo->binfo.resultRowInfo, (int32_t)1); - pOperator->name = "TimeIntervalAggOperator"; + pOperator->name = "TimeIntervalAggOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_INTERVAL; - pOperator->blocking = true; - pOperator->status = OP_NOT_OPENED; - pOperator->pExpr = pExprInfo; - pOperator->pTaskInfo = pTaskInfo; - pOperator->numOfExprs = numOfCols; - pOperator->info = pInfo; + pOperator->blocking = true; + pOperator->status = OP_NOT_OPENED; + pOperator->pExpr = pExprInfo; + pOperator->pTaskInfo = pTaskInfo; + pOperator->numOfExprs = numOfCols; + pOperator->info = pInfo; pOperator->fpSet = createOperatorFpSet(doOpenIntervalAgg, doBuildIntervalResult, doStreamIntervalAgg, NULL, destroyIntervalOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL); @@ -1218,32 +1458,38 @@ _error: SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId, - STimeWindowAggSupp* pTwAggSupp, const STableGroupInfo* pTableGroupInfo, - SExecTaskInfo* pTaskInfo) { + STimeWindowAggSupp* pTwAggSupp, SExecTaskInfo* pTaskInfo) { SStreamFinalIntervalOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamFinalIntervalOperatorInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { goto _error; } - pInfo->order = TSDB_ORDER_ASC; pInfo->interval = *pInterval; pInfo->twAggSup = *pTwAggSupp; pInfo->primaryTsIndex = primaryTsSlotId; - size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES; initResultSizeInfo(pOperator, 4096); - int32_t code = initAggInfo(&pInfo->binfo, &pInfo->aggSup, pExprInfo, numOfCols, pResBlock, keyBufSize, pTaskInfo->id.str); - initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window); if (code != TSDB_CODE_SUCCESS) { goto _error; } - initResultRowInfo(&pInfo->binfo.resultRowInfo, (int32_t)1); + int32_t numOfChild = 8;// Todo(liuyao) get it from phy plan + pInfo->pChildren = taosArrayInit(numOfChild, sizeof(SOperatorInfo)); + for (int32_t i = 0; i < numOfChild; i++) { + SSDataBlock* chRes = createOneDataBlock(pResBlock, false); + SOperatorInfo* pChildOp = createIntervalOperatorInfo(NULL, pExprInfo, numOfCols, + chRes, pInterval, primaryTsSlotId, pTwAggSupp, pTaskInfo); + if (pChildOp && chRes) { + taosArrayPush(pInfo->pChildren, &pChildOp); + continue; + } + goto _error; + } pOperator->name = "StreamFinalIntervalOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL; @@ -1275,8 +1521,7 @@ _error: SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId, - STimeWindowAggSupp* pTwAggSupp, const STableGroupInfo* pTableGroupInfo, - SExecTaskInfo* pTaskInfo) { + STimeWindowAggSupp* pTwAggSupp, SExecTaskInfo* pTaskInfo) { SIntervalAggOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SIntervalAggOperatorInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { @@ -1298,8 +1543,7 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SExpr initAggInfo(&pInfo->binfo, &pInfo->aggSup, pExprInfo, numOfCols, pResBlock, keyBufSize, pTaskInfo->id.str); initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pInfo->win); - // pInfo->pTableQueryInfo = initTableQueryInfo(pTableGroupInfo); - if (code != TSDB_CODE_SUCCESS /* || pInfo->pTableQueryInfo == NULL*/) { + if (code != TSDB_CODE_SUCCESS) { goto _error; } @@ -1421,13 +1665,13 @@ static SSDataBlock* doSessionWindowAgg(SOperatorInfo* pOperator) { return pBInfo->pRes; } - int32_t order = TSDB_ORDER_ASC; + int64_t st = taosGetTimestampUs(); + int32_t order = TSDB_ORDER_ASC; + SOperatorInfo* downstream = pOperator->pDownstream[0]; while (1) { - publishOperatorProfEvent(downstream, QUERY_PROF_BEFORE_OPERATOR_EXEC); SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream); - publishOperatorProfEvent(downstream, QUERY_PROF_AFTER_OPERATOR_EXEC); if (pBlock == NULL) { break; } @@ -1439,6 +1683,8 @@ static SSDataBlock* doSessionWindowAgg(SOperatorInfo* pOperator) { doSessionWindowAggImpl(pOperator, pInfo, pBlock); } + pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0; + // restore the value pOperator->status = OP_RES_TO_RETURN; closeAllResultRows(&pBInfo->resultRowInfo); @@ -1450,7 +1696,10 @@ static SSDataBlock* doSessionWindowAgg(SOperatorInfo* pOperator) { doSetOperatorCompleted(pOperator); } - return pBInfo->pRes->info.rows == 0 ? NULL : pBInfo->pRes; + size_t rows = pBInfo->pRes->info.rows; + pOperator->resultInfo.totalRows += rows; + + return (rows == 0)? NULL : pBInfo->pRes; } static SSDataBlock* doAllIntervalAgg(SOperatorInfo* pOperator) { @@ -1472,9 +1721,7 @@ static SSDataBlock* doAllIntervalAgg(SOperatorInfo* pOperator) { SOperatorInfo* downstream = pOperator->pDownstream[0]; while (1) { - publishOperatorProfEvent(downstream, QUERY_PROF_BEFORE_OPERATOR_EXEC); SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream); - publishOperatorProfEvent(downstream, QUERY_PROF_AFTER_OPERATOR_EXEC); if (pBlock == NULL) { break; } @@ -1637,9 +1884,10 @@ _error: return NULL; } -static SArray* doHashInterval(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pSDataBlock, +static SArray* doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBlock, int32_t tableGroupId) { SStreamFinalIntervalOperatorInfo* pInfo = (SStreamFinalIntervalOperatorInfo*)pOperatorInfo->info; + SResultRowInfo* pResultRowInfo = &(pInfo->binfo.resultRowInfo); SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo; int32_t numOfOutput = pOperatorInfo->numOfExprs; SArray* pUpdated = taosArrayInit(4, POINTER_BYTES); @@ -1647,14 +1895,17 @@ static SArray* doHashInterval(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRes bool ascScan = true; TSKEY* tsCols = NULL; SResultRow* pResult = NULL; - int32_t forwardStep = 0; + int32_t forwardRows = 0; if (pSDataBlock->pDataBlock != NULL) { SColumnInfoData* pColDataInfo = taosArrayGet(pSDataBlock->pDataBlock, pInfo->primaryTsIndex); tsCols = (int64_t*)pColDataInfo->pData; + } else { + return pUpdated; } + int32_t startPos = ascScan ? 0 : (pSDataBlock->info.rows - 1); - TSKEY ts = getStartTsKey(&pSDataBlock->info.window, tsCols, pSDataBlock->info.rows, ascScan); + TSKEY ts = getStartTsKey(&pSDataBlock->info.window, tsCols); STimeWindow nextWin = getActiveTimeWindow(pInfo->aggSup.pResultBuf, pResultRowInfo, ts, &pInfo->interval, pInfo->interval.precision, NULL); while (1) { @@ -1669,15 +1920,15 @@ static SArray* doHashInterval(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRes pos->pos = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset}; *(int64_t*)pos->key = pResult->win.skey; taosArrayPush(pUpdated, &pos); - forwardStep = + forwardRows = getNumOfRowsInTimeWindow(&pSDataBlock->info, tsCols, startPos, nextWin.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC); // window start(end) key interpolation - doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->binfo.pCtx, pResult, &nextWin, startPos, forwardStep, - pInfo->order, false); + // disable it temporarily +// doWindowBorderInterpolation(pInfo, pSDataBlock, numOfOutput, pInfo->binfo.pCtx, pResult, &nextWin, startPos, forwardRows); updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &nextWin, true); - doApplyFunctions(pTaskInfo, pInfo->binfo.pCtx, &nextWin, &pInfo->twAggSup.timeWindowData, startPos, forwardStep, tsCols, + doApplyFunctions(pTaskInfo, pInfo->binfo.pCtx, &nextWin, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, tsCols, pSDataBlock->info.rows, numOfOutput, TSDB_ORDER_ASC); - int32_t prevEndPos = (forwardStep - 1) * step + startPos; + int32_t prevEndPos = (forwardRows - 1) * step + startPos; startPos = getNextQualifiedWindow(&pInfo->interval, &nextWin, &pSDataBlock->info, tsCols, prevEndPos, pInfo->order); if (startPos < 0) { break; @@ -1686,6 +1937,51 @@ static SArray* doHashInterval(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRes return pUpdated; } +bool isFinalInterval(SStreamFinalIntervalOperatorInfo* pInfo) { + return pInfo->pChildren != NULL; +} + +void compactFunctions(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx, + int32_t numOfOutput, SExecTaskInfo* pTaskInfo) { + for (int32_t k = 0; k < numOfOutput; ++k) { + if (fmIsWindowPseudoColumnFunc(pDestCtx[k].functionId)) { + continue; + } + int32_t code = TSDB_CODE_SUCCESS; + if (functionNeedToExecute(&pDestCtx[k]) && pDestCtx[k].fpSet.combine != NULL) { + code = pDestCtx[k].fpSet.combine(&pDestCtx[k], &pSourceCtx[k]); + if (code != TSDB_CODE_SUCCESS) { + qError("%s apply functions error, code: %s", GET_TASKID(pTaskInfo), tstrerror(code)); + pTaskInfo->code = code; + longjmp(pTaskInfo->env, code); + } + } + } +} + +static void rebuildIntervalWindow(SStreamFinalIntervalOperatorInfo* pInfo, SArray *pWinArray, + int32_t groupId, int32_t numOfOutput, SExecTaskInfo* pTaskInfo) { + int32_t size = taosArrayGetSize(pWinArray); + ASSERT(pInfo->pChildren); + for (int32_t i = 0; i < size; i++) { + STimeWindow* pParentWin = taosArrayGet(pWinArray, i); + SResultRow* pCurResult = NULL; + setTimeWindowOutputBuf(&pInfo->binfo.resultRowInfo, pParentWin, true, &pCurResult, 0, + pInfo->binfo.pCtx, numOfOutput, pInfo->binfo.rowCellInfoOffset, &pInfo->aggSup, + pTaskInfo); + int32_t numOfChildren = taosArrayGetSize(pInfo->pChildren); + for (int32_t j = 0; j < numOfChildren; j++) { + SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, j); + SIntervalAggOperatorInfo* pChInfo = pChildOp->info; + SResultRow* pChResult = NULL; + setTimeWindowOutputBuf(&pChInfo->binfo.resultRowInfo, pParentWin, true, &pChResult, + 0, pChInfo->binfo.pCtx, pChildOp->numOfExprs, pChInfo->binfo.rowCellInfoOffset, + &pChInfo->aggSup, pTaskInfo); + compactFunctions(pInfo->binfo.pCtx, pChInfo->binfo.pCtx, numOfOutput, pTaskInfo); + } + } +} + static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { SStreamFinalIntervalOperatorInfo* pInfo = pOperator->info; SOperatorInfo* downstream = pOperator->pDownstream[0]; @@ -1702,19 +1998,34 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { } while (1) { - publishOperatorProfEvent(downstream, QUERY_PROF_BEFORE_OPERATOR_EXEC); SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream); - publishOperatorProfEvent(downstream, QUERY_PROF_AFTER_OPERATOR_EXEC); if (pBlock == NULL) { break; } + setInputDataBlock(pOperator, pInfo->binfo.pCtx, pBlock, pInfo->order, MAIN_SCAN, true); if (pBlock->info.type == STREAM_REPROCESS) { + SArray *pUpWins = taosArrayInit(8, sizeof(STimeWindow)); doClearWindows(&pInfo->aggSup, &pInfo->binfo, &pInfo->interval, - pInfo->primaryTsIndex, pOperator->numOfExprs, pBlock); + pInfo->primaryTsIndex, pOperator->numOfExprs, pBlock, pUpWins); + if (isFinalInterval(pInfo)) { + int32_t childIndex = 0; //Todo(liuyao) get child id from SSDataBlock + SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, childIndex); + SIntervalAggOperatorInfo* pChildInfo = pChildOp->info; + doClearWindows(&pChildInfo->aggSup, &pChildInfo->binfo, &pChildInfo->interval, + pChildInfo->primaryTsIndex, pChildOp->numOfExprs, pBlock, NULL); + rebuildIntervalWindow(pInfo, pUpWins, pInfo->binfo.pRes->info.groupId, + pOperator->numOfExprs, pOperator->pTaskInfo); + } + taosArrayDestroy(pUpWins); continue; } - pUpdated = doHashInterval(pOperator, &pInfo->binfo.resultRowInfo, pBlock, 0); + if (isFinalInterval(pInfo)) { + int32_t chIndex = 1; //Todo(liuyao) get it from SSDataBlock + SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, chIndex); + doStreamIntervalAgg(pChildOp); + } + pUpdated = doHashInterval(pOperator, pBlock, 0); } finalizeUpdatedResult(pOperator->numOfExprs, pInfo->aggSup.pResultBuf, pUpdated, pInfo->binfo.rowCellInfoOffset); @@ -1724,3 +2035,603 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { pOperator->status = OP_RES_TO_RETURN; return pInfo->binfo.pRes->info.rows == 0 ? NULL : pInfo->binfo.pRes; } + +void destroyStreamAggSupporter(SStreamAggSupporter* pSup) { + taosArrayDestroy(pSup->pResultRows); + taosMemoryFreeClear(pSup->pKeyBuf); + destroyDiskbasedBuf(pSup->pResultBuf); +} + +void destroyStreamSessionAggOperatorInfo(void* param, int32_t numOfOutput) { + SStreamSessionAggOperatorInfo* pInfo = (SStreamSessionAggOperatorInfo*)param; + doDestroyBasicInfo(&pInfo->binfo, numOfOutput); + destroyStreamAggSupporter(&pInfo->streamAggSup); + cleanupGroupResInfo(&pInfo->groupResInfo); + if (pInfo->pChildren != NULL) { + int32_t size = taosArrayGetSize(pInfo->pChildren); + for (int32_t i = 0; i < size; i++) { + SOperatorInfo *pChild = taosArrayGetP(pInfo->pChildren, i); + SStreamSessionAggOperatorInfo* pChInfo = pChild->info; + destroyStreamSessionAggOperatorInfo(pChInfo, numOfOutput); + taosMemoryFreeClear(pChild); + taosMemoryFreeClear(pChInfo); + } + } +} + +int32_t initBiasicInfo(SOptrBasicInfo* pBasicInfo, SExprInfo* pExprInfo, + int32_t numOfCols, SSDataBlock* pResultBlock, SDiskbasedBuf* pResultBuf) { + pBasicInfo->pCtx = createSqlFunctionCtx(pExprInfo, numOfCols, &pBasicInfo->rowCellInfoOffset); + pBasicInfo->pRes = pResultBlock; + for (int32_t i = 0; i < numOfCols; ++i) { + pBasicInfo->pCtx[i].pBuf = pResultBuf; + } + return TSDB_CODE_SUCCESS; +} + +void initDummyFunction(SqlFunctionCtx* pDummy, SqlFunctionCtx* pCtx, int32_t nums) { + for (int i = 0; i < nums; i++) { + pDummy[i].functionId = pCtx[i].functionId; + } +} +void initDownStream(SOperatorInfo* downstream, SStreamSessionAggOperatorInfo* pInfo) { + ASSERT(downstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN); + SStreamBlockScanInfo* pScanInfo = downstream->info; + pScanInfo->sessionSup = + (SessionWindowSupporter){.pStreamAggSup = &pInfo->streamAggSup, .gap = pInfo->gap}; + pScanInfo->pUpdateInfo = updateInfoInit(60000, TSDB_TIME_PRECISION_MILLI, 60000 * 60 * 6); +} + +SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, + SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, int64_t gap, + int32_t tsSlotId, STimeWindowAggSupp* pTwAggSupp, SExecTaskInfo* pTaskInfo) { + int32_t code = TSDB_CODE_OUT_OF_MEMORY; + SStreamSessionAggOperatorInfo* pInfo = + taosMemoryCalloc(1, sizeof(SStreamSessionAggOperatorInfo)); + SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); + if (pInfo == NULL || pOperator == NULL) { + goto _error; + } + + initResultSizeInfo(pOperator, 4096); + + code = initStreamAggSupporter(&pInfo->streamAggSup, "StreamSessionAggOperatorInfo"); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + + code = initBiasicInfo(&pInfo->binfo, pExprInfo, numOfCols, pResBlock, + pInfo->streamAggSup.pResultBuf); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + pInfo->streamAggSup.resultRowSize = getResultRowSize(pInfo->binfo.pCtx, numOfCols); + + pInfo->pDummyCtx = (SqlFunctionCtx*)taosMemoryCalloc(numOfCols, sizeof(SqlFunctionCtx)); + if (pInfo->pDummyCtx == NULL) { + goto _error; + } + initDummyFunction(pInfo->pDummyCtx, pInfo->binfo.pCtx, numOfCols); + + pInfo->twAggSup = *pTwAggSupp; + initResultRowInfo(&pInfo->binfo.resultRowInfo, 8); + initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window); + + pInfo->primaryTsIndex = tsSlotId; + pInfo->gap = gap; + pInfo->binfo.pRes = pResBlock; + pInfo->order = TSDB_ORDER_ASC; + _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); + pInfo->pStDeleted = taosHashInit(64, hashFn, true, HASH_NO_LOCK); + pInfo->pDelIterator = NULL; + pInfo->pDelRes = createOneDataBlock(pResBlock, false); + blockDataEnsureCapacity(pInfo->pDelRes, 64); + pInfo->pChildren = NULL; + + pOperator->name = "StreamSessionWindowAggOperator"; + pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW; + pOperator->blocking = true; + pOperator->status = OP_NOT_OPENED; + pOperator->pExpr = pExprInfo; + pOperator->numOfExprs = numOfCols; + pOperator->info = pInfo; + pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamSessionWindowAgg, + NULL, NULL, destroyStreamSessionAggOperatorInfo, aggEncodeResultRow, + aggDecodeResultRow, NULL); + pOperator->pTaskInfo = pTaskInfo; + initDownStream(downstream, pInfo); + code = appendDownstream(pOperator, &downstream, 1); + return pOperator; + +_error: + if (pInfo != NULL) { + destroyStreamSessionAggOperatorInfo(pInfo, numOfCols); + } + + taosMemoryFreeClear(pInfo); + taosMemoryFreeClear(pOperator); + pTaskInfo->code = code; + return NULL; +} + +int64_t getSessionWindowEndkey(void* data, int32_t index) { + SArray* pWinInfos = (SArray*) data; + SResultWindowInfo* pWin = taosArrayGet(pWinInfos, index); + return pWin->win.ekey; +} +static bool isInWindow(SResultWindowInfo* pWin, TSKEY ts, int64_t gap) { + int64_t sGap = ts - pWin->win.skey; + int64_t eGap = pWin->win.ekey - ts; + if ( (sGap < 0 && sGap >= -gap) || (eGap < 0 && eGap >= -gap) || (sGap >= 0 && eGap >= 0) ) { + return true; + } + return false; +} + +static SResultWindowInfo* insertNewSessionWindow(SArray* pWinInfos, TSKEY ts, + int32_t index) { + SResultWindowInfo win = + {.pos.offset = -1, .pos.pageId = -1, .win.skey = ts, .win.ekey = ts, .isOutput = false}; + return taosArrayInsert(pWinInfos, index, &win); +} + +static SResultWindowInfo* addNewSessionWindow(SArray* pWinInfos, TSKEY ts) { + SResultWindowInfo win = + {.pos.offset = -1, .pos.pageId = -1, .win.skey = ts, .win.ekey = ts, .isOutput = false}; + return taosArrayPush(pWinInfos, &win); +} + +SResultWindowInfo* getSessionTimeWindow(SArray* pWinInfos, TSKEY ts, int64_t gap, + int32_t* pIndex) { + int32_t size = taosArrayGetSize(pWinInfos); + if (size == 0) { + return addNewSessionWindow(pWinInfos, ts); + } + // find the first position which is smaller than the key + int32_t index = binarySearch(pWinInfos, size, ts, TSDB_ORDER_DESC, + getSessionWindowEndkey); + SResultWindowInfo* pWin = NULL; + if (index >= 0) { + pWin = taosArrayGet(pWinInfos, index); + if (isInWindow(pWin, ts, gap)) { + *pIndex = index; + return pWin; + } + } + + if (index + 1 < size) { + pWin = taosArrayGet(pWinInfos, index + 1); + if (isInWindow(pWin, ts, gap)) { + *pIndex = index + 1; + return pWin; + } + } + + if (index == size - 1) { + *pIndex = taosArrayGetSize(pWinInfos); + return addNewSessionWindow(pWinInfos, ts); + } + *pIndex = index; + return insertNewSessionWindow(pWinInfos, ts, index); +} + +int32_t updateSessionWindowInfo(SResultWindowInfo* pWinInfo, TSKEY* pTs, int32_t rows, + int32_t start, int64_t gap, SHashObj* pStDeleted) { + for (int32_t i = start; i < rows; ++i) { + if (!isInWindow(pWinInfo, pTs[i], gap)) { + return i - start; + } + if (pWinInfo->win.skey > pTs[i]) { + if (pStDeleted && pWinInfo->isOutput) { + taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &pWinInfo->win.skey, sizeof(TSKEY)); + pWinInfo->isOutput = false; + } + pWinInfo->win.skey = pTs[i]; + } + pWinInfo->win.ekey = TMAX(pWinInfo->win.ekey, pTs[i]); + } + return rows - start; +} + +static int32_t setWindowOutputBuf(SResultWindowInfo* pWinInfo, SResultRow** pResult, + SqlFunctionCtx* pCtx, int32_t groupId, int32_t numOfOutput, + int32_t* rowCellInfoOffset, SStreamAggSupporter* pAggSup, SExecTaskInfo* pTaskInfo) { + assert(pWinInfo->win.skey <= pWinInfo->win.ekey); + // too many time window in query + int32_t size = taosArrayGetSize(pAggSup->pResultRows); + if (size > MAX_INTERVAL_TIME_WINDOW) { + longjmp(pTaskInfo->env, TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW); + } + + if (pWinInfo->pos.pageId == -1) { + *pResult = getNewResultRow(pAggSup->pResultBuf, groupId, pAggSup->resultRowSize); + if (*pResult == NULL) { + return TSDB_CODE_OUT_OF_MEMORY; + } + initResultRow(*pResult); + + // add a new result set for a new group + pWinInfo->pos.pageId = (*pResult)->pageId; + pWinInfo->pos.offset = (*pResult)->offset; + } else { + *pResult = getResultRowByPos(pAggSup->pResultBuf, &pWinInfo->pos); + if (!(*pResult)) { + qError("getResultRowByPos return NULL, TID:%s", GET_TASKID(pTaskInfo)); + return TSDB_CODE_FAILED; + } + } + + // set time window for current result + (*pResult)->win = pWinInfo->win; + setResultRowInitCtx(*pResult, pCtx, numOfOutput, rowCellInfoOffset); + return TSDB_CODE_SUCCESS; +} + +static int32_t doOneWindowAgg(SStreamSessionAggOperatorInfo* pInfo, + SSDataBlock* pSDataBlock, SResultWindowInfo* pCurWin, SResultRow** pResult, + int32_t startIndex, int32_t winRows, int32_t numOutput, SExecTaskInfo* pTaskInfo ) { + SColumnInfoData* pColDataInfo = + taosArrayGet(pSDataBlock->pDataBlock, pInfo->primaryTsIndex); + TSKEY* tsCols = (int64_t*)pColDataInfo->pData; + int32_t code = setWindowOutputBuf(pCurWin, pResult, pInfo->binfo.pCtx, pSDataBlock->info.groupId, + numOutput, pInfo->binfo.rowCellInfoOffset, &pInfo->streamAggSup, pTaskInfo); + if (code != TSDB_CODE_SUCCESS || (*pResult) == NULL) { + return TSDB_CODE_QRY_OUT_OF_MEMORY; + } + updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pCurWin->win, true); + doApplyFunctions(pTaskInfo, pInfo->binfo.pCtx, &pCurWin->win, + &pInfo->twAggSup.timeWindowData, startIndex, winRows, tsCols, pSDataBlock->info.rows, + numOutput, TSDB_ORDER_ASC); + return TSDB_CODE_SUCCESS; +} + +int32_t copyWinInfoToDataBlock(SSDataBlock* pBlock, SStreamAggSupporter* pAggSup, + int32_t start, int32_t num, int32_t numOfExprs, SOptrBasicInfo* pBinfo) { + for (int32_t i = start; i < num; i += 1) { + SResultWindowInfo* pWinInfo = taosArrayGet(pAggSup->pResultRows, start); + SFilePage* bufPage = getBufPage(pAggSup->pResultBuf, pWinInfo->pos.pageId); + SResultRow* pRow = (SResultRow*)((char*)bufPage + pWinInfo->pos.offset); + for (int32_t j = 0; j < numOfExprs; ++j) { + SResultRowEntryInfo* pResultInfo = getResultCell(pRow, j, pBinfo->rowCellInfoOffset); + SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, j); + char* in = GET_ROWCELL_INTERBUF(pBinfo->pCtx[j].resultInfo); + colDataAppend(pColInfoData, pBlock->info.rows, in, pResultInfo->isNullRes); + } + pBlock->info.rows += pRow->numOfRows; + releaseBufPage(pAggSup->pResultBuf, bufPage); + } + blockDataUpdateTsWindow(pBlock, -1); + return TSDB_CODE_SUCCESS; +} + +int32_t getNumCompactWindow(SArray* pWinInfos, int32_t startIndex, int64_t gap) { + SResultWindowInfo* pCurWin = taosArrayGet(pWinInfos, startIndex); + int32_t size = taosArrayGetSize(pWinInfos); + // Just look for the window behind StartIndex + for (int32_t i = startIndex + 1; i < size; i++) { + SResultWindowInfo* pWinInfo = taosArrayGet(pWinInfos, i); + if (!isInWindow(pCurWin, pWinInfo->win.skey, gap)) { + return i - startIndex - 1; + } + } + + return size - startIndex - 1; +} + +void compactTimeWindow(SStreamSessionAggOperatorInfo* pInfo, int32_t startIndex, int32_t num, + int32_t groupId, int32_t numOfOutput, SExecTaskInfo* pTaskInfo, SHashObj* pStUpdated, SHashObj* pStDeleted) { + SResultWindowInfo* pCurWin = taosArrayGet(pInfo->streamAggSup.pResultRows, startIndex); + SResultRow* pCurResult = NULL; + setWindowOutputBuf(pCurWin, &pCurResult, pInfo->binfo.pCtx, groupId, + numOfOutput, pInfo->binfo.rowCellInfoOffset, &pInfo->streamAggSup, pTaskInfo); + num += startIndex + 1; + ASSERT(num <= taosArrayGetSize(pInfo->streamAggSup.pResultRows)); + // Just look for the window behind StartIndex + for (int32_t i = startIndex + 1; i < num; i++) { + SResultWindowInfo* pWinInfo = taosArrayGet(pInfo->streamAggSup.pResultRows, i); + SResultRow* pWinResult = NULL; + setWindowOutputBuf(pWinInfo, &pWinResult, pInfo->pDummyCtx, groupId, + numOfOutput, pInfo->binfo.rowCellInfoOffset, &pInfo->streamAggSup, pTaskInfo); + pCurWin->win.ekey = TMAX(pCurWin->win.ekey, pWinInfo->win.ekey); + compactFunctions(pInfo->binfo.pCtx, pInfo->pDummyCtx, numOfOutput, pTaskInfo); + taosHashRemove(pStUpdated, &pWinInfo->pos, sizeof(SResultRowPosition)); + if (pWinInfo->isOutput) { + taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &pWinInfo->win.skey, sizeof(TSKEY)); + pWinInfo->isOutput = false; + } + taosArrayRemove(pInfo->streamAggSup.pResultRows, i); + } +} + +static void doStreamSessionWindowAggImpl(SOperatorInfo* pOperator, + SSDataBlock* pSDataBlock, SHashObj* pStUpdated, SHashObj* pStDeleted) { + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + SStreamSessionAggOperatorInfo* pInfo = pOperator->info; + bool masterScan = true; + int32_t numOfOutput = pOperator->numOfExprs; + int64_t groupId = pSDataBlock->info.groupId; + int64_t gap = pInfo->gap; + int64_t code = TSDB_CODE_SUCCESS; + + int32_t step = 1; + bool ascScan = true; + TSKEY* tsCols = NULL; + SResultRow* pResult = NULL; + int32_t winRows = 0; + + if (pSDataBlock->pDataBlock != NULL) { + SColumnInfoData* pColDataInfo = + taosArrayGet(pSDataBlock->pDataBlock, pInfo->primaryTsIndex); + tsCols = (int64_t*)pColDataInfo->pData; + } else { + return ; + } + + SStreamAggSupporter* pAggSup = &pInfo->streamAggSup; + for(int32_t i = 0; i < pSDataBlock->info.rows; ) { + int32_t winIndex = 0; + SResultWindowInfo* pCurWin = + getSessionTimeWindow(pAggSup->pResultRows, tsCols[i], gap, &winIndex); + winRows = + updateSessionWindowInfo(pCurWin, tsCols, pSDataBlock->info.rows, i, pInfo->gap, pStDeleted); + code = doOneWindowAgg(pInfo, pSDataBlock, pCurWin, &pResult, i, winRows, numOfOutput, pTaskInfo); + if (code != TSDB_CODE_SUCCESS || pResult == NULL) { + longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + } + // window start(end) key interpolation + // doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->binfo.pCtx, pResult, &nextWin, startPos, forwardRows, + // pInfo->order, false); + int32_t winNum = getNumCompactWindow(pAggSup->pResultRows, winIndex, gap); + if (winNum > 0) { + compactTimeWindow(pInfo, winIndex, winNum, groupId, numOfOutput, pTaskInfo, pStUpdated, pStDeleted); + } + pCurWin->isClosed = false; + if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) { + code = taosHashPut(pStUpdated, &pCurWin->pos, sizeof(SResultRowPosition), &(pCurWin->win.skey), sizeof(TSKEY)); + if (code != TSDB_CODE_SUCCESS) { + longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + } + pCurWin->isOutput = true; + } + i += winRows; + } +} + +static void doClearSessionWindows(SStreamAggSupporter* pAggSup, SOptrBasicInfo* pBinfo, + SSDataBlock* pBlock, int32_t tsIndex, int32_t numOfOutput, int64_t gap, SArray* result) { + SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, tsIndex); + TSKEY *tsCols = (TSKEY*)pColDataInfo->pData; + int32_t step = 0; + for (int32_t i = 0; i < pBlock->info.rows; i += step) { + int32_t winIndex = 0; + SResultWindowInfo* pCurWin = + getSessionTimeWindow(pAggSup->pResultRows, tsCols[i], gap, &winIndex); + step = updateSessionWindowInfo(pCurWin, tsCols, pBlock->info.rows, i, gap, NULL); + ASSERT(isInWindow(pCurWin, tsCols[i], gap)); + doClearWindowImpl(&pCurWin->pos, pAggSup->pResultBuf, pBinfo, numOfOutput); + if (result) { + taosArrayPush(result, pCurWin); + } + } +} + +static int32_t copyUpdateResult(SHashObj* pStUpdated, SArray* pUpdated, int32_t groupId) { + void* pData = NULL; + size_t keyLen = 0; + while((pData = taosHashIterate(pStUpdated, pData)) != NULL) { + void* key = taosHashGetKey(pData, &keyLen); + ASSERT(keyLen == sizeof(SResultRowPosition)); + SResKeyPos* pos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t)); + if (pos == NULL) { + return TSDB_CODE_QRY_OUT_OF_MEMORY; + } + pos->groupId = groupId; + pos->pos = *(SResultRowPosition*)key; + *(int64_t*)pos->key = *(uint64_t*)pData; + taosArrayPush(pUpdated, &pos); + } + return TSDB_CODE_SUCCESS; +} + +void doBuildDeleteDataBlock(SHashObj* pStDeleted, SSDataBlock* pBlock, void** Ite) { + blockDataCleanup(pBlock); + size_t keyLen = 0; + while(( (*Ite) = taosHashIterate(pStDeleted, *Ite)) != NULL) { + SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, 0); + colDataAppend(pColInfoData, pBlock->info.rows, *Ite, false); + for (int32_t i = 1; i < pBlock->info.numOfCols; i++) { + pColInfoData = taosArrayGet(pBlock->pDataBlock, i); + colDataAppendNULL(pColInfoData, pBlock->info.rows); + } + pBlock->info.rows += 1; + if (pBlock->info.rows + 1 >= pBlock->info.capacity) { + break; + } + } + if ((*Ite) == NULL) { + taosHashClear(pStDeleted); + } +} + +static void rebuildTimeWindow(SStreamSessionAggOperatorInfo* pInfo, SArray *pWinArray, + int32_t groupId, int32_t numOfOutput, SExecTaskInfo* pTaskInfo) { + int32_t size = taosArrayGetSize(pWinArray); + ASSERT(pInfo->pChildren); + for (int32_t i = 0; i < size; i++) { + SResultWindowInfo* pParentWin = taosArrayGet(pWinArray, i); + SResultRow* pCurResult = NULL; + setWindowOutputBuf(pParentWin, &pCurResult, pInfo->binfo.pCtx, groupId, + numOfOutput, pInfo->binfo.rowCellInfoOffset, &pInfo->streamAggSup, pTaskInfo); + int32_t numOfChildren = taosArrayGetSize(pInfo->pChildren); + for (int32_t j = 0; j < numOfChildren; j++) { + SOperatorInfo* pChild = taosArrayGetP(pInfo->pChildren, j); + SStreamSessionAggOperatorInfo* pChInfo = pChild->info; + SArray* pChWins = pChInfo->streamAggSup.pResultRows; + int32_t chWinSize = taosArrayGetSize(pChWins); + int32_t index = binarySearch(pChWins, chWinSize, pParentWin->win.skey, + TSDB_ORDER_DESC, getSessionWindowEndkey); + for (int32_t k = index; k > 0 && k < chWinSize; k++) { + SResultWindowInfo* pcw = taosArrayGet(pChWins, k); + if (pParentWin->win.skey <= pcw->win.skey && pcw->win.ekey <= pParentWin->win.ekey) { + SResultRow* pChResult = NULL; + setWindowOutputBuf(pcw, &pChResult, pChInfo->binfo.pCtx, groupId, + numOfOutput, pChInfo->binfo.rowCellInfoOffset, &pChInfo->streamAggSup, pTaskInfo); + compactFunctions(pInfo->binfo.pCtx, pChInfo->binfo.pCtx, numOfOutput, pTaskInfo); + continue; + } + break; + } + } + } +} + +bool isFinalSession(SStreamSessionAggOperatorInfo* pInfo) { + return pInfo->pChildren != NULL; +} + +int32_t closeSessionWindow(SArray *pWins, STimeWindowAggSupp *pTwSup, SArray *pClosed, + int8_t calTrigger) { + // Todo(liuyao) save window to tdb + int32_t size = taosArrayGetSize(pWins); + for (int32_t i = 0; i < size; i++) { + SResultWindowInfo *pSeWin = taosArrayGet(pWins, i); + if (pSeWin->win.ekey < pTwSup->maxTs - pTwSup->waterMark) { + if (!pSeWin->isClosed) { + SResKeyPos* pos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t)); + if (pos == NULL) { + return TSDB_CODE_OUT_OF_MEMORY; + } + pos->groupId = 0; + pos->pos = pSeWin->pos; + *(int64_t*)pos->key = pSeWin->win.ekey; + if (!taosArrayPush(pClosed, &pos)) { + taosMemoryFree(pos); + return TSDB_CODE_OUT_OF_MEMORY; + } + pSeWin->isClosed = true; + if (calTrigger == STREAM_TRIGGER__WINDOW_CLOSE) { + pSeWin->isOutput = true; + } + } + continue; + } + break; + } + return TSDB_CODE_SUCCESS; +} + +static SSDataBlock* doStreamSessionWindowAgg(SOperatorInfo* pOperator) { + if (pOperator->status == OP_EXEC_DONE) { + return NULL; + } + + SStreamSessionAggOperatorInfo* pInfo = pOperator->info; + SOptrBasicInfo* pBInfo = &pInfo->binfo; + if (pOperator->status == OP_RES_TO_RETURN) { + doBuildDeleteDataBlock(pInfo->pStDeleted, pInfo->pDelRes, &pInfo->pDelIterator); + if (pInfo->pDelRes->info.rows > 0) { + return pInfo->pDelRes; + } + doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, + pInfo->streamAggSup.pResultBuf); + if (pBInfo->pRes->info.rows == 0 || + !hashRemainDataInGroupInfo(&pInfo->groupResInfo)) { + doSetOperatorCompleted(pOperator); + } + return pBInfo->pRes->info.rows == 0 ? NULL : pBInfo->pRes; + } + + _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); + SHashObj* pStUpdated = taosHashInit(64, hashFn, true, HASH_NO_LOCK); + SOperatorInfo* downstream = pOperator->pDownstream[0]; + while (1) { + SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream); + if (pBlock == NULL) { + break; + } + // the pDataBlock are always the same one, no need to call this again + setInputDataBlock(pOperator, pBInfo->pCtx, pBlock, TSDB_ORDER_ASC, MAIN_SCAN, true); + if (pBlock->info.type == STREAM_REPROCESS) { + SArray *pWins = taosArrayInit(16, sizeof(SResultWindowInfo)); + doClearSessionWindows(&pInfo->streamAggSup, &pInfo->binfo, pBlock, 0, + pOperator->numOfExprs, pInfo->gap, pWins); + if (isFinalSession(pInfo)) { + int32_t childIndex = 0; //Todo(liuyao) get child id from SSDataBlock + SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, childIndex); + SStreamSessionAggOperatorInfo* pChildInfo = pChildOp->info; + doClearSessionWindows(&pChildInfo->streamAggSup, &pChildInfo->binfo, pBlock, 0, + pChildOp->numOfExprs, pChildInfo->gap, NULL); + rebuildTimeWindow(pInfo, pWins, pInfo->binfo.pRes->info.groupId, pOperator->numOfExprs, pOperator->pTaskInfo); + } + taosArrayDestroy(pWins); + continue; + } + if (isFinalSession(pInfo)) { + int32_t childIndex = 0; //Todo(liuyao) get child id from SSDataBlock + SOptrBasicInfo* pChildOp = taosArrayGetP(pInfo->pChildren, childIndex); + doStreamSessionWindowAggImpl(pOperator, pBlock, NULL, NULL); + } + doStreamSessionWindowAggImpl(pOperator, pBlock, pStUpdated, pInfo->pStDeleted); + pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey); + } + // restore the value + pOperator->status = OP_RES_TO_RETURN; + + SArray* pClosed = taosArrayInit(16, POINTER_BYTES); + closeSessionWindow(pInfo->streamAggSup.pResultRows, &pInfo->twAggSup, pClosed, + pInfo->twAggSup.calTrigger); + SArray* pUpdated = taosArrayInit(16, POINTER_BYTES); + copyUpdateResult(pStUpdated, pUpdated, pBInfo->pRes->info.groupId); + taosHashCleanup(pStUpdated); + if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER__WINDOW_CLOSE) { + taosArrayAddAll(pUpdated, pClosed); + } + + finalizeUpdatedResult(pOperator->numOfExprs, pInfo->streamAggSup.pResultBuf, pUpdated, + pInfo->binfo.rowCellInfoOffset); + initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated); + blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity); + doBuildDeleteDataBlock(pInfo->pStDeleted, pInfo->pDelRes, &pInfo->pDelIterator); + if (pInfo->pDelRes->info.rows > 0) { + return pInfo->pDelRes; + } + doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, + pInfo->streamAggSup.pResultBuf); + return pBInfo->pRes->info.rows == 0 ? NULL : pBInfo->pRes; +} + +SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream, + SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, int64_t gap, + int32_t tsSlotId, STimeWindowAggSupp* pTwAggSupp, SExecTaskInfo* pTaskInfo) { + int32_t code = TSDB_CODE_OUT_OF_MEMORY; + SStreamSessionAggOperatorInfo* pInfo = NULL; + SOperatorInfo* pOperator = createStreamSessionAggOperatorInfo(downstream, pExprInfo, + numOfCols, pResBlock, gap, tsSlotId, pTwAggSupp, pTaskInfo); + if (pOperator == NULL) { + goto _error; + } + pOperator->name = "StreamFinalSessionWindowAggOperator"; + pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION_WINDOW; + int32_t numOfChild = 1; //Todo(liuyao) get it from phy plan + pInfo = pOperator->info; + pInfo->pChildren = taosArrayInit(8, sizeof(void *)); + for (int32_t i = 0; i < numOfChild; i++) { + SOperatorInfo* pChild = createStreamSessionAggOperatorInfo(NULL, pExprInfo, + numOfCols, NULL, gap, tsSlotId, pTwAggSupp, pTaskInfo); + if (pChild == NULL) { + goto _error; + } + taosArrayPush(pInfo->pChildren, &pChild); + } + return pOperator; + +_error: + if (pInfo != NULL) { + destroyStreamSessionAggOperatorInfo(pInfo, numOfCols); + } + + taosMemoryFreeClear(pInfo); + taosMemoryFreeClear(pOperator); + pTaskInfo->code = code; + return NULL; +} diff --git a/source/libs/executor/src/tlinearhash.c b/source/libs/executor/src/tlinearhash.c index 21fd54b620574272aad02315a3f0d74ae05405a8..00a9f3ae6c8ff087a9031c7c0d70e81bb3c88504 100644 --- a/source/libs/executor/src/tlinearhash.c +++ b/source/libs/executor/src/tlinearhash.c @@ -247,7 +247,7 @@ SLHashObj* tHashInit(int32_t inMemPages, int32_t pageSize, _hash_fn_t fn, int32_ return NULL; } - int32_t code = createDiskbasedBuf(&pHashObj->pBuf, pageSize, inMemPages * pageSize, 0, "/tmp"); + int32_t code = createDiskbasedBuf(&pHashObj->pBuf, pageSize, inMemPages * pageSize, 0, TD_TMP_DIR_PATH); if (code != 0) { terrno = code; return NULL; diff --git a/source/libs/executor/src/tsort.c b/source/libs/executor/src/tsort.c index d585988e5e6c9802ee9864d89b956ecfbb2cdf33..7581836d595b2a01e119ddbbdea24b7cd9cb6a74 100644 --- a/source/libs/executor/src/tsort.c +++ b/source/libs/executor/src/tsort.c @@ -31,20 +31,16 @@ struct STupleHandle { struct SSortHandle { int32_t type; - int32_t pageSize; int32_t numOfPages; SDiskbasedBuf *pBuf; SArray *pSortInfo; - SArray *pIndexMap; SArray *pOrderedSource; - _sort_fetch_block_fn_t fetchfp; - _sort_merge_compar_fn_t comparFn; - SMultiwayMergeTreeInfo *pMergeTree; - int64_t startTs; + int32_t loops; uint64_t sortElapsed; + int64_t startTs; uint64_t totalElapsed; int32_t sourceId; @@ -53,13 +49,15 @@ struct SSortHandle { int32_t numOfCompletedSources; bool opened; const char *idStr; - bool inMemSort; bool needAdjust; STupleHandle tupleHandle; - void *param; void (*beforeFp)(SSDataBlock* pBlock, void* param); + + _sort_fetch_block_fn_t fetchfp; + _sort_merge_compar_fn_t comparFn; + SMultiwayMergeTreeInfo *pMergeTree; }; static int32_t msortComparFn(const void *pLeft, const void *pRight, void *param); @@ -80,7 +78,7 @@ SSortHandle* tsortCreateSortHandle(SArray* pSortInfo, SArray* pIndexMap, int32_t pSortHandle->pageSize = pageSize; pSortHandle->numOfPages = numOfPages; pSortHandle->pSortInfo = pSortInfo; - pSortHandle->pIndexMap = pIndexMap; + pSortHandle->loops = 0; if (pBlock != NULL) { pSortHandle->pDataBlock = createOneDataBlock(pBlock, false); @@ -155,7 +153,7 @@ static int32_t doAddToBuf(SSDataBlock* pDataBlock, SSortHandle* pHandle) { int32_t start = 0; if (pHandle->pBuf == NULL) { - int32_t code = createDiskbasedBuf(&pHandle->pBuf, pHandle->pageSize, pHandle->numOfPages * pHandle->pageSize, "doAddToBuf", "/tmp"); + int32_t code = createDiskbasedBuf(&pHandle->pBuf, pHandle->pageSize, pHandle->numOfPages * pHandle->pageSize, "doAddToBuf", TD_TMP_DIR_PATH); dBufSetPrintInfo(pHandle->pBuf); if (code != TSDB_CODE_SUCCESS) { return code; @@ -217,7 +215,7 @@ static int32_t sortComparInit(SMsortComparParam* cmpParam, SArray* pSources, int } else { // multi-pass internal merge sort is required if (pHandle->pBuf == NULL) { - code = createDiskbasedBuf(&pHandle->pBuf, pHandle->pageSize, pHandle->numOfPages * pHandle->pageSize, "sortComparInit", "/tmp"); + code = createDiskbasedBuf(&pHandle->pBuf, pHandle->pageSize, pHandle->numOfPages * pHandle->pageSize, "sortComparInit", TD_TMP_DIR_PATH); dBufSetPrintInfo(pHandle->pBuf); if (code != TSDB_CODE_SUCCESS) { return code; @@ -415,6 +413,9 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) { int32_t numOfRows = blockDataGetCapacityInRow(pHandle->pDataBlock, pHandle->pageSize); blockDataEnsureCapacity(pHandle->pDataBlock, numOfRows); + // the initial pass + sortPass + final mergePass + pHandle->loops = sortPass + 2; + size_t numOfSorted = taosArrayGetSize(pHandle->pOrderedSource); for(int32_t t = 0; t < sortPass; ++t) { int64_t st = taosGetTimestampUs(); @@ -502,12 +503,13 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) { return 0; } -static int32_t createInitialSortedMultiSources(SSortHandle* pHandle) { +static int32_t createInitialSources(SSortHandle* pHandle) { size_t sortBufSize = pHandle->numOfPages * pHandle->pageSize; if (pHandle->type == SORT_SINGLESOURCE_SORT) { SSortSource* source = taosArrayGetP(pHandle->pOrderedSource, 0); taosArrayClear(pHandle->pOrderedSource); + while (1) { SSDataBlock* pBlock = pHandle->fetchfp(source->param); if (pBlock == NULL) { @@ -524,6 +526,7 @@ static int32_t createInitialSortedMultiSources(SSortHandle* pHandle) { } else { pHandle->pageSize = 4096; } + // todo!! pHandle->numOfPages = 1024; sortBufSize = pHandle->numOfPages * pHandle->pageSize; @@ -535,7 +538,7 @@ static int32_t createInitialSortedMultiSources(SSortHandle* pHandle) { } // todo relocate the columns - int32_t code = blockDataMerge(pHandle->pDataBlock, pBlock, pHandle->pIndexMap); + int32_t code = blockDataMerge(pHandle->pDataBlock, pBlock); if (code != 0) { return code; } @@ -569,6 +572,7 @@ static int32_t createInitialSortedMultiSources(SSortHandle* pHandle) { pHandle->cmpParam.numOfSources = 1; pHandle->inMemSort = true; + pHandle->loops = 1; pHandle->tupleHandle.rowIndex = -1; pHandle->tupleHandle.pBlock = pHandle->pDataBlock; return 0; @@ -592,7 +596,7 @@ int32_t tsortOpen(SSortHandle* pHandle) { pHandle->opened = true; - int32_t code = createInitialSortedMultiSources(pHandle); + int32_t code = createInitialSources(pHandle); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -692,3 +696,20 @@ void* tsortGetValue(STupleHandle* pVHandle, int32_t colIndex) { SColumnInfoData* pColInfo = TARRAY_GET_ELEM(pVHandle->pBlock->pDataBlock, colIndex); return colDataGetData(pColInfo, pVHandle->rowIndex); } + +SSortExecInfo tsortGetSortExecInfo(SSortHandle* pHandle) { + SSortExecInfo info = {0}; + + info.sortBuffer = pHandle->pageSize * pHandle->numOfPages; + info.sortMethod = pHandle->inMemSort? SORT_QSORT_T:SORT_SPILLED_MERGE_SORT_T; + info.loops = pHandle->loops; + + if (pHandle->pBuf != NULL) { + SDiskbasedBufStatis st = getDBufStatis(pHandle->pBuf); + info.writeBytes = st.flushBytes; + info.readBytes = st.loadBytes; + } + + return info; +} + diff --git a/source/libs/function/CMakeLists.txt b/source/libs/function/CMakeLists.txt index 7a4cd8092205786065015252432dcb4de0a1db41..ea401e56e5c6585b93344af99280bb450137f98f 100644 --- a/source/libs/function/CMakeLists.txt +++ b/source/libs/function/CMakeLists.txt @@ -14,7 +14,7 @@ target_include_directories( target_link_libraries( function - PRIVATE os util common nodes scalar catalog qcom transport + PRIVATE os util common nodes scalar qcom transport PUBLIC uv_a ) diff --git a/source/libs/function/inc/builtins.h b/source/libs/function/inc/builtins.h index 3a753325bdffc3886af44a1f06a8a6d1a1dcd31b..3bd0f35bf5f8b29cd585ec841363b091b02211c5 100644 --- a/source/libs/function/inc/builtins.h +++ b/source/libs/function/inc/builtins.h @@ -37,6 +37,7 @@ typedef struct SBuiltinFuncDefinition { FScalarExecProcess sprocessFunc; FExecFinalize finalizeFunc; FExecProcess invertFunc; + FExecCombine combineFunc; } SBuiltinFuncDefinition; extern const SBuiltinFuncDefinition funcMgtBuiltins[]; diff --git a/source/libs/function/inc/builtinsimpl.h b/source/libs/function/inc/builtinsimpl.h index 3e2ccbc6b8fd86926f576eee274efa233a6ed95c..68b83f4a1955c72e119dcadd5d409ce10639e5e1 100644 --- a/source/libs/function/inc/builtinsimpl.h +++ b/source/libs/function/inc/builtinsimpl.h @@ -27,6 +27,7 @@ bool functionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo); int32_t functionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock); int32_t dummyProcess(SqlFunctionCtx* UNUSED_PARAM(pCtx)); int32_t functionFinalizeWithResultBuf(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, char* finalResult); +int32_t combineFunction(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx); EFuncDataRequired countDataRequired(SFunctionNode* pFunc, STimeWindow* pTimeWindow); bool getCountFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv); @@ -37,24 +38,29 @@ EFuncDataRequired statisDataRequired(SFunctionNode* pFunc, STimeWindow* pTimeWin bool getSumFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv); int32_t sumFunction(SqlFunctionCtx *pCtx); int32_t sumInvertFunction(SqlFunctionCtx *pCtx); +int32_t sumCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx); bool minmaxFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo); bool getMinmaxFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv); int32_t minFunction(SqlFunctionCtx* pCtx); int32_t maxFunction(SqlFunctionCtx *pCtx); int32_t minmaxFunctionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock); +int32_t minCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx); +int32_t maxCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx); bool getAvgFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv); bool avgFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo); int32_t avgFunction(SqlFunctionCtx* pCtx); int32_t avgFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock); int32_t avgInvertFunction(SqlFunctionCtx* pCtx); +int32_t avgCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx); bool getStddevFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv); bool stddevFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo); int32_t stddevFunction(SqlFunctionCtx* pCtx); int32_t stddevFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock); int32_t stddevInvertFunction(SqlFunctionCtx* pCtx); +int32_t stddevCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx); bool getLeastSQRFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv); bool leastSQRFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo); @@ -67,6 +73,11 @@ bool percentileFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultI int32_t percentileFunction(SqlFunctionCtx *pCtx); int32_t percentileFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock); +bool getApercentileFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv); +bool apercentileFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo); +int32_t apercentileFunction(SqlFunctionCtx *pCtx); +int32_t apercentileFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock); + bool getDiffFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv); bool diffFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResInfo); int32_t diffFunction(SqlFunctionCtx *pCtx); @@ -74,7 +85,9 @@ int32_t diffFunction(SqlFunctionCtx *pCtx); bool getFirstLastFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv); int32_t firstFunction(SqlFunctionCtx *pCtx); int32_t lastFunction(SqlFunctionCtx *pCtx); -int32_t lastFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock); +int32_t firstLastFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock); +int32_t firstCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx); +int32_t lastCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx); bool getTopBotFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv); int32_t topFunction(SqlFunctionCtx *pCtx); @@ -127,6 +140,10 @@ bool uniqueFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo) int32_t uniqueFunction(SqlFunctionCtx *pCtx); //int32_t uniqueFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock); +bool getTwaFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv); +bool twaFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo); +int32_t twaFunction(SqlFunctionCtx *pCtx); +int32_t twaFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock* pBlock); bool getSelectivityFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv); diff --git a/source/libs/function/inc/functionMgtInt.h b/source/libs/function/inc/functionMgtInt.h index 4d45eb91cebced879e75ce23368b7a650d119f2e..29dd0bcd90d6297ca539bad8a5c5cd78ff151d1d 100644 --- a/source/libs/function/inc/functionMgtInt.h +++ b/source/libs/function/inc/functionMgtInt.h @@ -28,7 +28,7 @@ extern "C" { #define FUNC_MGT_AGG_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(0) #define FUNC_MGT_SCALAR_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(1) -#define FUNC_MGT_NONSTANDARD_SQL_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(2) +#define FUNC_MGT_INDEFINITE_ROWS_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(2) #define FUNC_MGT_STRING_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(3) #define FUNC_MGT_DATETIME_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(4) #define FUNC_MGT_TIMELINE_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(5) @@ -44,9 +44,7 @@ extern "C" { #define FUNC_MGT_TEST_MASK(val, mask) (((val) & (mask)) != 0) -#define FUNC_UDF_ID_START 5000 -#define FUNC_AGGREGATE_UDF_ID 5001 -#define FUNC_SCALAR_UDF_ID 5002 +#define FUNC_UDF_ID_START 5000 extern const int funcMgtUdfNum; diff --git a/source/libs/function/inc/taggfunction.h b/source/libs/function/inc/taggfunction.h index d779cf50f4ce019ddcea41b71720347d54a34e96..c3d61d426d889cecda0723b48c6c26eae16316ff 100644 --- a/source/libs/function/inc/taggfunction.h +++ b/source/libs/function/inc/taggfunction.h @@ -52,13 +52,6 @@ typedef struct SInterpInfoDetail { int8_t primaryCol; } SInterpInfoDetail; -typedef struct STwaInfo { - int8_t hasResult; // flag to denote has value - double dOutput; - SPoint1 p; - STimeWindow win; -} STwaInfo; - bool topbot_datablock_filter(SqlFunctionCtx *pCtx, const char *minval, const char *maxval); /** diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 3e71888bf9fe3dd8e2d14cd3289f287bbd6494ed..6046450f52f9f87ba767d417d0c0095e75d3d90e 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -14,11 +14,10 @@ */ #include "builtins.h" -#include "querynodes.h" #include "builtinsimpl.h" +#include "querynodes.h" #include "scalar.h" #include "taoserror.h" -#include "tdatablock.h" static int32_t buildFuncErrMsg(char* pErrBuf, int32_t len, int32_t errCode, const char* pFormat, ...) { va_list vArgList; @@ -156,12 +155,29 @@ static int32_t translatePercentile(SFunctionNode* pFunc, char* pErrBuf, int32_t return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } + //param0 + SNode* pParamNode0 = nodesListGetNode(pFunc->pParameterList, 0); + if (nodeType(pParamNode0) != QUERY_NODE_COLUMN) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "The first parameter of PERCENTILE function can only be column"); + } + + //param1 + SValueNode* pValue = (SValueNode*)nodesListGetNode(pFunc->pParameterList, 1); + + if (pValue->datum.i < 0 || pValue->datum.i > 100) { + return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName); + } + + pValue->notReserved = true; + uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; if (!IS_NUMERIC_TYPE(para1Type) || (!IS_SIGNED_NUMERIC_TYPE(para2Type) && !IS_UNSIGNED_NUMERIC_TYPE(para2Type))) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } + //set result type pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; return TSDB_CODE_SUCCESS; } @@ -175,22 +191,52 @@ static bool validAperventileAlgo(const SValueNode* pVal) { } static int32_t translateApercentile(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - int32_t paraNum = LIST_LENGTH(pFunc->pParameterList); - if (2 != paraNum && 3 != paraNum) { + int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); + if (2 != numOfParams && 3 != numOfParams) { return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } + //param0 + SNode* pParamNode0 = nodesListGetNode(pFunc->pParameterList, 0); + if (nodeType(pParamNode0) != QUERY_NODE_COLUMN) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "The first parameter of APERCENTILE function can only be column"); + } + + //param1 + SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1); + if (nodeType(pParamNode1) != QUERY_NODE_VALUE) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + + SValueNode* pValue = (SValueNode*)pParamNode1; + if (pValue->datum.i < 0 || pValue->datum.i > 100) { + return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName); + } + + pValue->notReserved = true; + uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; if (!IS_NUMERIC_TYPE(para1Type) || !IS_INTEGER_TYPE(para2Type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } - if (3 == paraNum) { - SNode* pPara3 = nodesListGetNode(pFunc->pParameterList, 2); - if (QUERY_NODE_VALUE != nodeType(pPara3) || !validAperventileAlgo((SValueNode*)pPara3)) { + + //param2 + if (3 == numOfParams) { + uint8_t para3Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type; + if (!IS_VAR_DATA_TYPE(para3Type)) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + + SNode* pParamNode2 = nodesListGetNode(pFunc->pParameterList, 2); + if (QUERY_NODE_VALUE != nodeType(pParamNode2) || !validAperventileAlgo((SValueNode*)pParamNode2)) { return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, "Third parameter algorithm of apercentile must be 'default' or 't-digest'"); } + + pValue = (SValueNode*)pParamNode2; + pValue->notReserved = true; } pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; @@ -205,17 +251,31 @@ static int32_t translateTbnameColumn(SFunctionNode* pFunc, char* pErrBuf, int32_ } static int32_t translateTop(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - int32_t paraNum = LIST_LENGTH(pFunc->pParameterList); - if (2 != paraNum) { + int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); + if (2 != numOfParams) { return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, 1); - if (nodeType(pParamNode) != QUERY_NODE_VALUE) { + uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; + if (!IS_NUMERIC_TYPE(para1Type) || !IS_INTEGER_TYPE(para2Type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } - SValueNode* pValue = (SValueNode*) pParamNode; + //param0 + SNode* pParamNode0 = nodesListGetNode(pFunc->pParameterList, 0); + if (nodeType(pParamNode0) != QUERY_NODE_COLUMN) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "The first parameter of TOP/BOTTOM function can only be column"); + } + + //param1 + SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1); + if (nodeType(pParamNode1) != QUERY_NODE_VALUE) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + + SValueNode* pValue = (SValueNode*)pParamNode1; if (pValue->node.resType.type != TSDB_DATA_TYPE_BIGINT) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -224,6 +284,9 @@ static int32_t translateTop(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName); } + pValue->notReserved = true; + + //set result type SDataType* pType = &((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType; pFunc->node.resType = (SDataType){.bytes = pType->bytes, .type = pType->type}; return TSDB_CODE_SUCCESS; @@ -248,15 +311,16 @@ static int32_t translateSpread(SFunctionNode* pFunc, char* pErrBuf, int32_t len) } static int32_t translateElapsed(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - int32_t paraNum = LIST_LENGTH(pFunc->pParameterList); - if (1 != paraNum && 2 != paraNum) { + int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); + if (1 != numOfParams && 2 != numOfParams) { return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0); - if (QUERY_NODE_COLUMN != nodeType(pPara)) { + // param0 + SNode* pParaNode0 = nodesListGetNode(pFunc->pParameterList, 0); + if (QUERY_NODE_COLUMN != nodeType(pParaNode0)) { return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, - "The input parameter of ELAPSED function can only be column"); + "The first parameter of ELAPSED function can only be column"); } uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; @@ -264,6 +328,28 @@ static int32_t translateElapsed(SFunctionNode* pFunc, char* pErrBuf, int32_t len return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } + // param1 + if (2 == numOfParams) { + SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1); + if (QUERY_NODE_VALUE != nodeType(pParamNode1)) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + + SValueNode* pValue = (SValueNode*)pParamNode1; + + pValue->notReserved = true; + + paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; + if (!IS_INTEGER_TYPE(paraType)) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + + if (pValue->datum.i == 0) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "ELAPSED function time unit parameter should be greater than db precision"); + } + } + pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; return TSDB_CODE_SUCCESS; } @@ -275,6 +361,17 @@ static int32_t translateLeastSQR(SFunctionNode* pFunc, char* pErrBuf, int32_t le } for (int32_t i = 0; i < numOfParams; ++i) { + SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, i); + if (i > 0) { // param1 & param2 + if (QUERY_NODE_VALUE != nodeType(pParamNode)) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + + SValueNode* pValue = (SValueNode*)pParamNode; + + pValue->notReserved = true; + } + uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, i))->resType.type; if (!IS_NUMERIC_TYPE(colType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); @@ -286,15 +383,35 @@ static int32_t translateLeastSQR(SFunctionNode* pFunc, char* pErrBuf, int32_t le } static int32_t translateHistogram(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (4 != LIST_LENGTH(pFunc->pParameterList)) { + int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); + if (4 != numOfParams) { return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } + // param0 + SNode* pParaNode0 = nodesListGetNode(pFunc->pParameterList, 0); + if (QUERY_NODE_COLUMN != nodeType(pParaNode0)) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "The first parameter of HISTOGRAM function can only be column"); + } + uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; if (!IS_NUMERIC_TYPE(colType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } + // param1 ~ param3 + for (int32_t i = 1; i < numOfParams; ++i) { + SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, i); + if (QUERY_NODE_VALUE != nodeType(pParamNode)) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + + SValueNode* pValue = (SValueNode*)pParamNode; + + pValue->notReserved = true; + } + if (((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type != TSDB_DATA_TYPE_BINARY || ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_BINARY || ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 3))->resType.type != TSDB_DATA_TYPE_BIGINT) { @@ -321,53 +438,93 @@ static int32_t translateHLL(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { } static int32_t translateStateCount(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (3 != LIST_LENGTH(pFunc->pParameterList)) { + int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); + if (3 != numOfParams) { return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } + // param0 + SNode* pParaNode0 = nodesListGetNode(pFunc->pParameterList, 0); + if (QUERY_NODE_COLUMN != nodeType(pParaNode0)) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "The input parameter of STATECOUNT function can only be column"); + } uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; if (!IS_NUMERIC_TYPE(colType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } + // param1 & param2 + for (int32_t i = 1; i < numOfParams; ++i) { + SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, i); + if (QUERY_NODE_VALUE != nodeType(pParamNode)) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + + SValueNode* pValue = (SValueNode*)pParamNode; + + pValue->notReserved = true; + } + if (((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type != TSDB_DATA_TYPE_BINARY || (((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_BIGINT && ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_DOUBLE)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } - pFunc->node.resType = (SDataType) { .bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT }; + // set result type + pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT}; return TSDB_CODE_SUCCESS; } static int32_t translateStateDuration(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - int32_t paraNum = LIST_LENGTH(pFunc->pParameterList); - if (3 != paraNum && 4 != paraNum) { + int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); + if (3 != numOfParams && 4 != numOfParams) { return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } + // param0 + SNode* pParaNode0 = nodesListGetNode(pFunc->pParameterList, 0); + if (QUERY_NODE_COLUMN != nodeType(pParaNode0)) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "The input parameter of STATEDURATION function can only be column"); + } uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; if (!IS_NUMERIC_TYPE(colType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } + // param1, param2 & param3 + for (int32_t i = 1; i < numOfParams; ++i) { + SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, i); + if (QUERY_NODE_VALUE != nodeType(pParamNode)) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + + SValueNode* pValue = (SValueNode*)pParamNode; + + pValue->notReserved = true; + } + if (((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type != TSDB_DATA_TYPE_BINARY || (((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_BIGINT && ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_DOUBLE)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } - if (paraNum == 4 && ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 3))->resType.type != TSDB_DATA_TYPE_BIGINT) { + if (numOfParams == 4 && + ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 3))->resType.type != TSDB_DATA_TYPE_BIGINT) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } - pFunc->node.resType = (SDataType) { .bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT }; + // set result type + pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT}; return TSDB_CODE_SUCCESS; } static int32_t translateCsum(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return TSDB_CODE_SUCCESS; + return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0); @@ -392,7 +549,7 @@ static int32_t translateCsum(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { } } - pFunc->node.resType = (SDataType) { .bytes = tDataTypes[resType].bytes, .type = resType}; + pFunc->node.resType = (SDataType){.bytes = tDataTypes[resType].bytes, .type = resType}; return TSDB_CODE_SUCCESS; } @@ -401,13 +558,28 @@ static int32_t translateMavg(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0); - if (QUERY_NODE_COLUMN != nodeType(pPara)) { + // param0 + SNode* pParaNode0 = nodesListGetNode(pFunc->pParameterList, 0); + if (QUERY_NODE_COLUMN != nodeType(pParaNode0)) { return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, - "The input parameter of MAVG function can only be column"); + "The first parameter of MAVG function can only be column"); } uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + + // param1 + SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1); + if (QUERY_NODE_VALUE != nodeType(pParamNode1)) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + + SValueNode* pValue = (SValueNode*)pParamNode1; + if (pValue->datum.i < 1 || pValue->datum.i > 1000) { + return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName); + } + + pValue->notReserved = true; + uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; if (!IS_NUMERIC_TYPE(colType) || !IS_INTEGER_TYPE(paraType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); @@ -422,24 +594,41 @@ static int32_t translateSample(SFunctionNode* pFunc, char* pErrBuf, int32_t len) return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0); - if (QUERY_NODE_COLUMN != nodeType(pPara)) { + // param0 + SNode* pParamNode0 = nodesListGetNode(pFunc->pParameterList, 0); + if (QUERY_NODE_COLUMN != nodeType(pParamNode0)) { return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, - "The input parameter of SAMPLE function can only be column"); + "The first parameter of SAMPLE function can only be column"); + } + + SExprNode* pCol = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0); + uint8_t colType = pCol->resType.type; + + // param1 + SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1); + if (QUERY_NODE_VALUE != nodeType(pParamNode1)) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + + SValueNode* pValue = (SValueNode*)pParamNode1; + if (pValue->datum.i < 1 || pValue->datum.i > 1000) { + return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName); } + pValue->notReserved = true; + uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; if (!IS_INTEGER_TYPE(paraType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } - SExprNode* pCol = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0); - uint8_t colType = pCol->resType.type; + // set result type if (IS_VAR_DATA_TYPE(colType)) { pFunc->node.resType = (SDataType){.bytes = pCol->resType.bytes, .type = colType}; } else { pFunc->node.resType = (SDataType){.bytes = tDataTypes[colType].bytes, .type = colType}; } + return TSDB_CODE_SUCCESS; } @@ -449,21 +638,39 @@ static int32_t translateTail(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } + // param0 SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0); if (QUERY_NODE_COLUMN != nodeType(pPara)) { return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, - "The input parameter of TAIL function can only be column"); + "The first parameter of TAIL function can only be column"); } + SExprNode* pCol = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0); + uint8_t colType = pCol->resType.type; + // param1 & param2 for (int32_t i = 1; i < numOfParams; ++i) { + SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, i); + if (QUERY_NODE_VALUE != nodeType(pParamNode)) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + + SValueNode* pValue = (SValueNode*)pParamNode; + + if (pValue->datum.i < ((i > 1) ? 0 : 1) || pValue->datum.i > 100) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "TAIL function second parameter should be in range [1, 100], " + "third parameter should be in range [0, 100]"); + } + + pValue->notReserved = true; + uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, i))->resType.type; if (!IS_INTEGER_TYPE(paraType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } } - SExprNode* pCol = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0); - uint8_t colType = pCol->resType.type; + // set result type if (IS_VAR_DATA_TYPE(colType)) { pFunc->node.resType = (SDataType){.bytes = pCol->resType.bytes, .type = colType}; } else { @@ -500,8 +707,7 @@ static int32_t translateUnique(SFunctionNode* pFunc, char* pErrBuf, int32_t len) SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0); if (QUERY_NODE_COLUMN != nodeType(pPara)) { - return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, - "The parameters of UNIQUE can only be columns"); + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, "The parameters of UNIQUE can only be columns"); } pFunc->node.resType = ((SExprNode*)pPara)->resType; @@ -509,17 +715,52 @@ static int32_t translateUnique(SFunctionNode* pFunc, char* pErrBuf, int32_t len) } static int32_t translateDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - int32_t paraLen = LIST_LENGTH(pFunc->pParameterList); - if (paraLen == 0 || paraLen > 2) { + int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); + if (numOfParams == 0 || numOfParams > 2) { return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - SExprNode* p1 = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0); - if (!IS_SIGNED_NUMERIC_TYPE(p1->resType.type) && !IS_FLOAT_TYPE(p1->resType.type) && - TSDB_DATA_TYPE_BOOL != p1->resType.type) { + //param0 + SNode* pParamNode0 = nodesListGetNode(pFunc->pParameterList, 0); + if (nodeType(pParamNode0) != QUERY_NODE_COLUMN) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "The first parameter of DIFF function can only be column"); + } + + uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + if (!IS_SIGNED_NUMERIC_TYPE(colType) && !IS_FLOAT_TYPE(colType) && + TSDB_DATA_TYPE_BOOL != colType) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } - pFunc->node.resType = p1->resType; + + //param1 + if (numOfParams == 2) { + uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; + if (!IS_INTEGER_TYPE(paraType)) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + + SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1); + if (QUERY_NODE_VALUE != nodeType(pParamNode1)) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + + SValueNode* pValue = (SValueNode*)pParamNode1; + if (pValue->datum.i != 0 && pValue->datum.i != 1) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "Second parameter of DIFF function should be only 0 or 1"); + } + + pValue->notReserved = true; + } + + uint8_t resType; + if (IS_SIGNED_NUMERIC_TYPE(colType) || TSDB_DATA_TYPE_BOOL == colType) { + resType = TSDB_DATA_TYPE_BIGINT; + } else { + resType = TSDB_DATA_TYPE_DOUBLE; + } + pFunc->node.resType = (SDataType){.bytes = tDataTypes[resType].bytes, .type = resType}; return TSDB_CODE_SUCCESS; } @@ -538,8 +779,8 @@ static int32_t translateLength(SFunctionNode* pFunc, char* pErrBuf, int32_t len) static int32_t translateConcatImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t len, int32_t minParaNum, int32_t maxParaNum, bool hasSep) { - int32_t paraNum = LIST_LENGTH(pFunc->pParameterList); - if (paraNum < minParaNum || paraNum > maxParaNum) { + int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); + if (numOfParams < minParaNum || numOfParams > maxParaNum) { return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } @@ -548,7 +789,7 @@ static int32_t translateConcatImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t int32_t sepBytes = 0; /* For concat/concat_ws function, if params have NCHAR type, promote the final result to NCHAR */ - for (int32_t i = 0; i < paraNum; ++i) { + for (int32_t i = 0; i < numOfParams; ++i) { SNode* pPara = nodesListGetNode(pFunc->pParameterList, i); uint8_t paraType = ((SExprNode*)pPara)->resType.type; if (!IS_VAR_DATA_TYPE(paraType)) { @@ -559,7 +800,7 @@ static int32_t translateConcatImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t } } - for (int32_t i = 0; i < paraNum; ++i) { + for (int32_t i = 0; i < numOfParams; ++i) { SNode* pPara = nodesListGetNode(pFunc->pParameterList, i); uint8_t paraType = ((SExprNode*)pPara)->resType.type; int32_t paraBytes = ((SExprNode*)pPara)->resType.bytes; @@ -575,7 +816,7 @@ static int32_t translateConcatImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t } if (hasSep) { - resultBytes += sepBytes * (paraNum - 3); + resultBytes += sepBytes * (numOfParams - 3); } pFunc->node.resType = (SDataType){.bytes = resultBytes, .type = resultType}; @@ -591,24 +832,37 @@ static int32_t translateConcatWs(SFunctionNode* pFunc, char* pErrBuf, int32_t le } static int32_t translateSubstr(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - int32_t paraNum = LIST_LENGTH(pFunc->pParameterList); - if (2 != paraNum && 3 != paraNum) { + int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); + if (2 != numOfParams && 3 != numOfParams) { return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - SExprNode* pPara1 = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0); - uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; - if (!IS_VAR_DATA_TYPE(pPara1->resType.type) || !IS_INTEGER_TYPE(para2Type)) { + SExprNode* pPara0 = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0); + SExprNode* p1 = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 1); + + uint8_t para1Type = p1->resType.type; + if (!IS_VAR_DATA_TYPE(pPara0->resType.type) || !IS_INTEGER_TYPE(para1Type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } - if (3 == paraNum) { - uint8_t para3Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; - if (!IS_INTEGER_TYPE(para3Type)) { + + if (((SValueNode*)p1)->datum.i < 1) { + return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName); + } + + if (3 == numOfParams) { + SExprNode* p2 = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 2); + uint8_t para2Type = p2->resType.type; + if (!IS_INTEGER_TYPE(para2Type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } + + int64_t v = ((SValueNode*)p1)->datum.i; + if (v < 0 || v > INT16_MAX) { + return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName); + } } - pFunc->node.resType = (SDataType){.bytes = pPara1->resType.bytes, .type = pPara1->resType.type}; + pFunc->node.resType = (SDataType){.bytes = pPara0->resType.bytes, .type = pPara0->resType.type}; return TSDB_CODE_SUCCESS; } @@ -626,9 +880,14 @@ static int32_t translateCast(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { (para2Type == TSDB_DATA_TYPE_BINARY && para1Type == TSDB_DATA_TYPE_NCHAR)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } + int32_t para2Bytes = pFunc->node.resType.bytes; + if (IS_VAR_DATA_TYPE(para2Type)) { + para2Bytes -= VARSTR_HEADER_SIZE; + } if (para2Bytes <= 0 || para2Bytes > 1000) { // cast dst var type length limits to 1000 - return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName); + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "CAST function converted length should be in range [0, 1000]"); } return TSDB_CODE_SUCCESS; } @@ -678,8 +937,8 @@ static int32_t translateTimeTruncate(SFunctionNode* pFunc, char* pErrBuf, int32_ } static int32_t translateTimeDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - int32_t paraNum = LIST_LENGTH(pFunc->pParameterList); - if (2 != paraNum && 3 != paraNum) { + int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); + if (2 != numOfParams && 3 != numOfParams) { return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } @@ -690,7 +949,7 @@ static int32_t translateTimeDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t le } } - if (3 == paraNum) { + if (3 == numOfParams) { if (!IS_INTEGER_TYPE(((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -731,7 +990,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .initFunc = functionSetup, .processFunc = countFunction, .finalizeFunc = functionFinalize, - .invertFunc = countInvertFunction + .invertFunc = countInvertFunction, + .combineFunc = combineFunction, }, { .name = "sum", @@ -743,7 +1003,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .initFunc = functionSetup, .processFunc = sumFunction, .finalizeFunc = functionFinalize, - .invertFunc = sumInvertFunction + .invertFunc = sumInvertFunction, + .combineFunc = sumCombine, }, { .name = "min", @@ -754,7 +1015,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .getEnvFunc = getMinmaxFuncEnv, .initFunc = minmaxFunctionSetup, .processFunc = minFunction, - .finalizeFunc = minmaxFunctionFinalize + .finalizeFunc = minmaxFunctionFinalize, + .combineFunc = minCombine }, { .name = "max", @@ -765,7 +1027,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .getEnvFunc = getMinmaxFuncEnv, .initFunc = minmaxFunctionSetup, .processFunc = maxFunction, - .finalizeFunc = minmaxFunctionFinalize + .finalizeFunc = minmaxFunctionFinalize, + .combineFunc = maxCombine }, { .name = "stddev", @@ -776,7 +1039,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .initFunc = stddevFunctionSetup, .processFunc = stddevFunction, .finalizeFunc = stddevFinalize, - .invertFunc = stddevInvertFunction + .invertFunc = stddevInvertFunction, + .combineFunc = stddevCombine, }, { .name = "leastsquares", @@ -787,7 +1051,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .initFunc = leastSQRFunctionSetup, .processFunc = leastSQRFunction, .finalizeFunc = leastSQRFinalize, - .invertFunc = leastSQRInvertFunction + .invertFunc = leastSQRInvertFunction, }, { .name = "avg", @@ -798,7 +1062,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .initFunc = avgFunctionSetup, .processFunc = avgFunction, .finalizeFunc = avgFinalize, - .invertFunc = avgInvertFunction + .invertFunc = avgInvertFunction, + .combineFunc = avgCombine, }, { .name = "percentile", @@ -815,15 +1080,15 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .type = FUNCTION_TYPE_APERCENTILE, .classification = FUNC_MGT_AGG_FUNC, .translateFunc = translateApercentile, - .getEnvFunc = getMinmaxFuncEnv, - .initFunc = minmaxFunctionSetup, - .processFunc = maxFunction, - .finalizeFunc = functionFinalize + .getEnvFunc = getApercentileFuncEnv, + .initFunc = apercentileFunctionSetup, + .processFunc = apercentileFunction, + .finalizeFunc = apercentileFinalize }, { .name = "top", .type = FUNCTION_TYPE_TOP, - .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC, .translateFunc = translateTop, .getEnvFunc = getTopBotFuncEnv, .initFunc = functionSetup, @@ -833,7 +1098,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "bottom", .type = FUNCTION_TYPE_BOTTOM, - .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC, .translateFunc = translateBottom, .getEnvFunc = getTopBotFuncEnv, .initFunc = functionSetup, @@ -880,7 +1145,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .getEnvFunc = getFirstLastFuncEnv, .initFunc = functionSetup, .processFunc = firstFunction, - .finalizeFunc = functionFinalize + .finalizeFunc = firstLastFinalize, + .combineFunc = firstCombine, }, { .name = "last", @@ -890,7 +1156,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .getEnvFunc = getFirstLastFuncEnv, .initFunc = functionSetup, .processFunc = lastFunction, - .finalizeFunc = lastFinalize + .finalizeFunc = firstLastFinalize, + .combineFunc = lastCombine, + }, + { + .name = "twa", + .type = FUNCTION_TYPE_TWA, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC, + .translateFunc = translateInNumOutDou, + .getEnvFunc = getTwaFuncEnv, + .initFunc = twaFunctionSetup, + .processFunc = twaFunction, + .finalizeFunc = twaFinalize }, { .name = "histogram", @@ -915,7 +1192,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "diff", .type = FUNCTION_TYPE_DIFF, - .classification = FUNC_MGT_NONSTANDARD_SQL_FUNC | FUNC_MGT_TIMELINE_FUNC, + .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC, .translateFunc = translateDiff, .getEnvFunc = getDiffFuncEnv, .initFunc = diffFunctionSetup, @@ -923,9 +1200,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .finalizeFunc = functionFinalize }, { - .name = "state_count", + .name = "statecount", .type = FUNCTION_TYPE_STATE_COUNT, - .classification = FUNC_MGT_NONSTANDARD_SQL_FUNC, + .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC, .translateFunc = translateStateCount, .getEnvFunc = getStateFuncEnv, .initFunc = functionSetup, @@ -933,9 +1210,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .finalizeFunc = NULL }, { - .name = "state_duration", + .name = "stateduration", .type = FUNCTION_TYPE_STATE_DURATION, - .classification = FUNC_MGT_NONSTANDARD_SQL_FUNC | FUNC_MGT_TIMELINE_FUNC, + .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC, .translateFunc = translateStateDuration, .getEnvFunc = getStateFuncEnv, .initFunc = functionSetup, @@ -945,7 +1222,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "csum", .type = FUNCTION_TYPE_CSUM, - .classification = FUNC_MGT_NONSTANDARD_SQL_FUNC | FUNC_MGT_TIMELINE_FUNC, + .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC, .translateFunc = translateCsum, .getEnvFunc = getCsumFuncEnv, .initFunc = functionSetup, @@ -955,7 +1232,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "mavg", .type = FUNCTION_TYPE_MAVG, - .classification = FUNC_MGT_NONSTANDARD_SQL_FUNC | FUNC_MGT_TIMELINE_FUNC, + .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC, .translateFunc = translateMavg, .getEnvFunc = getMavgFuncEnv, .initFunc = mavgFunctionSetup, @@ -965,7 +1242,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "sample", .type = FUNCTION_TYPE_SAMPLE, - .classification = FUNC_MGT_NONSTANDARD_SQL_FUNC | FUNC_MGT_TIMELINE_FUNC, + .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC, .translateFunc = translateSample, .getEnvFunc = getSampleFuncEnv, .initFunc = sampleFunctionSetup, @@ -975,7 +1252,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "tail", .type = FUNCTION_TYPE_TAIL, - .classification = FUNC_MGT_NONSTANDARD_SQL_FUNC | FUNC_MGT_TIMELINE_FUNC, + .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC, .translateFunc = translateTail, .getEnvFunc = getTailFuncEnv, .initFunc = tailFunctionSetup, @@ -985,7 +1262,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "unique", .type = FUNCTION_TYPE_UNIQUE, - .classification = FUNC_MGT_NONSTANDARD_SQL_FUNC | FUNC_MGT_TIMELINE_FUNC, + .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC, .translateFunc = translateUnique, .getEnvFunc = getUniqueFuncEnv, .initFunc = uniqueFunctionSetup, diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index ad92d095d5a292d366f127642e835b3dadda10dd..4cfb3b5252fdb257f1947c86468c963d2510c72e 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -20,6 +20,8 @@ #include "taggfunction.h" #include "tcompare.h" #include "tdatablock.h" +#include "tdigest.h" +#include "thistogram.h" #include "tpercentile.h" #define HISTOGRAM_MAX_BINS_NUM 1000 @@ -95,6 +97,19 @@ typedef struct SPercentileInfo { int64_t numOfElems; } SPercentileInfo; +typedef struct SAPercentileInfo { + double result; + int8_t algo; + SHistogramInfo *pHisto; + TDigest *pTDigest; +} SAPercentileInfo; + +typedef enum { + APERCT_ALGO_UNKNOWN = 0, + APERCT_ALGO_DEFAULT, + APERCT_ALGO_TDIGEST, +} EAPerctAlgoType; + typedef struct SDiffInfo { bool hasPrev; bool includeNull; @@ -284,7 +299,7 @@ int32_t functionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId); SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); - //pResInfo->isNullRes = (pResInfo->numOfRes == 0) ? 1 : 0; + pResInfo->isNullRes = (pResInfo->numOfRes == 0) ? 1 : 0; char* in = GET_ROWCELL_INTERBUF(pResInfo); colDataAppend(pCol, pBlock->info.rows, in, pResInfo->isNullRes); @@ -292,6 +307,24 @@ int32_t functionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { return pResInfo->numOfRes; } +int32_t firstCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) { + SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx); + char* pDBuf = GET_ROWCELL_INTERBUF(pDResInfo); + int32_t type = pDestCtx->input.pData[0]->info.type; + int32_t bytes = pDestCtx->input.pData[0]->info.bytes; + + SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx); + char* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo); + + if (pSResInfo->numOfRes != 0 && + (pDResInfo->numOfRes == 0 || *(TSKEY*)(pDBuf + bytes) > *(TSKEY*)(pSBuf + bytes)) ) { + memcpy(pDBuf, pSBuf, bytes); + *(TSKEY*)(pDBuf + bytes) = *(TSKEY*)(pSBuf + bytes); + pDResInfo->numOfRes = 1; + } + return TSDB_CODE_SUCCESS; +} + int32_t dummyProcess(SqlFunctionCtx* UNUSED_PARAM(pCtx)) { return 0; } @@ -388,6 +421,18 @@ int32_t countInvertFunction(SqlFunctionCtx* pCtx) { return TSDB_CODE_SUCCESS; } +int32_t combineFunction(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) { + SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx); + char* pDBuf = GET_ROWCELL_INTERBUF(pDResInfo); + + SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx); + char* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo); + *((int64_t*)pDBuf) += *((int64_t*)pSBuf); + + SET_VAL(pDResInfo, *((int64_t*)pDBuf), 1); + return TSDB_CODE_SUCCESS; +} + #define LIST_ADD_N(_res, _col, _start, _rows, _t, numOfElem) \ do { \ _t* d = (_t*)(_col->pData); \ @@ -472,6 +517,11 @@ int32_t sumFunction(SqlFunctionCtx* pCtx) { } } + //check for overflow + if (IS_FLOAT_TYPE(type) && (isinf(pSumRes->dsum) || isnan(pSumRes->dsum))) { + GET_RES_INFO(pCtx)->isNullRes = 1; + } + _sum_over: // data in the check operation are all null, not output SET_VAL(GET_RES_INFO(pCtx), numOfElem, 1); @@ -537,6 +587,26 @@ int32_t sumInvertFunction(SqlFunctionCtx* pCtx) { return TSDB_CODE_SUCCESS; } +int32_t sumCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) { + SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx); + SSumRes* pDBuf = GET_ROWCELL_INTERBUF(pDResInfo); + int32_t type = pDestCtx->input.pData[0]->info.type; + + SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx); + SSumRes* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo); + + if (IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_BOOL) { + pDBuf->isum += pSBuf->isum; + } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) { + pDBuf->usum += pSBuf->usum; + } else if (type == TSDB_DATA_TYPE_DOUBLE || type == TSDB_DATA_TYPE_FLOAT) { + pDBuf->dsum += pSBuf->dsum; + } + + SET_VAL(pDResInfo, *((int64_t*)pDBuf), 1); + return TSDB_CODE_SUCCESS; +} + bool getSumFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) { pEnv->calcMemSize = sizeof(SSumRes); return true; @@ -738,16 +808,41 @@ int32_t avgInvertFunction(SqlFunctionCtx* pCtx) { return TSDB_CODE_SUCCESS; } +int32_t avgCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) { + SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx); + SAvgRes* pDBuf = GET_ROWCELL_INTERBUF(pDResInfo); + int32_t type = pDestCtx->input.pData[0]->info.type; + + SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx); + SAvgRes* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo); + + if (IS_INTEGER_TYPE(type)) { + pDBuf->sum.isum += pSBuf->sum.isum; + } else { + pDBuf->sum.dsum += pSBuf->sum.dsum; + } + pDBuf->count += pSBuf->count; + + return TSDB_CODE_SUCCESS; +} + int32_t avgFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { SInputColumnInfoData* pInput = &pCtx->input; - int32_t type = pInput->pData[0]->info.type; - SAvgRes* pAvgRes = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); + + int32_t type = pInput->pData[0]->info.type; + SAvgRes* pAvgRes = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); + if (IS_INTEGER_TYPE(type)) { pAvgRes->result = pAvgRes->sum.isum / ((double)pAvgRes->count); } else { pAvgRes->result = pAvgRes->sum.dsum / ((double)pAvgRes->count); } + //check for overflow + if (isinf(pAvgRes->result) || isnan(pAvgRes->result)) { + GET_RES_INFO(pCtx)->isNullRes = 1; + } + return functionFinalize(pCtx, pBlock); } @@ -1273,6 +1368,34 @@ void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuple } } +int32_t minMaxCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx, int32_t isMinFunc) { + SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx); + SMinmaxResInfo* pDBuf = GET_ROWCELL_INTERBUF(pDResInfo); + int32_t type = pDestCtx->input.pData[0]->info.type; + + SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx); + SMinmaxResInfo* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo); + if (IS_FLOAT_TYPE(type)) { + if (pSBuf->assign && + ( (((*(double*)&pDBuf->v) < (*(double*)&pSBuf->v)) ^ isMinFunc) || !pDBuf->assign ) ) { + *(double*) &pDBuf->v = *(double*) &pSBuf->v; + } + } else { + if ( pSBuf->assign && ( ((pDBuf->v < pSBuf->v) ^ isMinFunc) || !pDBuf->assign ) ) { + pDBuf->v = pSBuf->v; + } + } + SET_VAL(pDResInfo, *((int64_t*)pDBuf), 1); + return TSDB_CODE_SUCCESS; +} + +int32_t minCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) { + return minMaxCombine(pDestCtx, pSourceCtx, 1); +} +int32_t maxCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) { + return minMaxCombine(pDestCtx, pSourceCtx, 0); +} + bool getStddevFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) { pEnv->calcMemSize = sizeof(SStddevRes); return true; @@ -1491,6 +1614,25 @@ int32_t stddevFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { return functionFinalize(pCtx, pBlock); } +int32_t stddevCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) { + SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx); + SStddevRes* pDBuf = GET_ROWCELL_INTERBUF(pDResInfo); + int32_t type = pDestCtx->input.pData[0]->info.type; + + SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx); + SStddevRes* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo); + + if (IS_INTEGER_TYPE(type)) { + pDBuf->isum += pSBuf->isum; + pDBuf->quadraticISum += pSBuf->quadraticISum; + } else { + pDBuf->dsum += pSBuf->dsum; + pDBuf->quadraticDSum += pSBuf->quadraticDSum; + } + pDBuf->count += pSBuf->count; + return TSDB_CODE_SUCCESS; +} + bool getLeastSQRFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) { pEnv->calcMemSize = sizeof(SLeastSQRInfo); return true; @@ -1686,7 +1828,7 @@ bool percentileFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultI } int32_t percentileFunction(SqlFunctionCtx* pCtx) { - int32_t notNullElems = 0; + int32_t numOfElems = 0; SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); SInputColumnInfoData* pInput = &pCtx->input; @@ -1764,11 +1906,11 @@ int32_t percentileFunction(SqlFunctionCtx* pCtx) { } char* data = colDataGetData(pCol, i); - notNullElems += 1; + numOfElems += 1; tMemBucketPut(pInfo->pMemBucket, data, 1); } - SET_VAL(pResInfo, notNullElems, 1); + SET_VAL(pResInfo, numOfElems, 1); } return TSDB_CODE_SUCCESS; @@ -1790,6 +1932,131 @@ int32_t percentileFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { return functionFinalize(pCtx, pBlock); } +bool getApercentileFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) { + int32_t bytesHist = (int32_t)(sizeof(SAPercentileInfo) + sizeof(SHistogramInfo) + sizeof(SHistBin) * (MAX_HISTOGRAM_BIN + 1)); + int32_t bytesDigest = (int32_t)(sizeof(SAPercentileInfo) + TDIGEST_SIZE(COMPRESSION)); + pEnv->calcMemSize = TMAX(bytesHist, bytesDigest); + return true; +} + +static int8_t getApercentileAlgo(char *algoStr) { + int8_t algoType; + if (strcasecmp(algoStr, "default") == 0) { + algoType = APERCT_ALGO_DEFAULT; + } else if (strcasecmp(algoStr, "t-digest") == 0) { + algoType = APERCT_ALGO_TDIGEST; + } else { + algoType = APERCT_ALGO_UNKNOWN; + } + + return algoType; +} + +static void buildHistogramInfo(SAPercentileInfo* pInfo) { + pInfo->pHisto = (SHistogramInfo*) ((char*) pInfo + sizeof(SAPercentileInfo)); + pInfo->pHisto->elems = (SHistBin*) ((char*)pInfo->pHisto + sizeof(SHistogramInfo)); +} + +bool apercentileFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInfo) { + if (!functionSetup(pCtx, pResultInfo)) { + return false; + } + + SAPercentileInfo* pInfo = GET_ROWCELL_INTERBUF(pResultInfo); + if (pCtx->numOfParams == 2) { + pInfo->algo = APERCT_ALGO_DEFAULT; + } else if (pCtx->numOfParams == 3) { + pInfo->algo = getApercentileAlgo(varDataVal(pCtx->param[2].param.pz)); + if (pInfo->algo == APERCT_ALGO_UNKNOWN) { + return false; + } + } + + char *tmp = (char *)pInfo + sizeof(SAPercentileInfo); + if (pInfo->algo == APERCT_ALGO_TDIGEST) { + pInfo->pTDigest = tdigestNewFrom(tmp, COMPRESSION); + } else { + buildHistogramInfo(pInfo); + pInfo->pHisto = tHistogramCreateFrom(tmp, MAX_HISTOGRAM_BIN); + } + + return true; +} + +int32_t apercentileFunction(SqlFunctionCtx* pCtx) { + int32_t numOfElems = 0; + SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); + + SInputColumnInfoData* pInput = &pCtx->input; + //SColumnDataAgg* pAgg = pInput->pColumnDataAgg[0]; + + SColumnInfoData* pCol = pInput->pData[0]; + int32_t type = pCol->info.type; + + SAPercentileInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo); + + int32_t start = pInput->startRowIndex; + if (pInfo->algo == APERCT_ALGO_TDIGEST) { + for (int32_t i = start; i < pInput->numOfRows + start; ++i) { + if (colDataIsNull_f(pCol->nullbitmap, i)) { + continue; + } + numOfElems += 1; + char* data = colDataGetData(pCol, i); + + double v = 0; // value + int64_t w = 1; // weigth + GET_TYPED_DATA(v, double, type, data); + tdigestAdd(pInfo->pTDigest, v, w); + } + } else { + for (int32_t i = start; i < pInput->numOfRows + start; ++i) { + if (colDataIsNull_f(pCol->nullbitmap, i)) { + continue; + } + numOfElems += 1; + char* data = colDataGetData(pCol, i); + + double v = 0; + GET_TYPED_DATA(v, double, type, data); + tHistogramAdd(&pInfo->pHisto, v); + } + } + + SET_VAL(pResInfo, numOfElems, 1); + return TSDB_CODE_SUCCESS; +} + +int32_t apercentileFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { + SVariant* pVal = &pCtx->param[1].param; + double percent = (pVal->nType == TSDB_DATA_TYPE_BIGINT) ? pVal->i : pVal->d; + + SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); + SAPercentileInfo* pInfo = (SAPercentileInfo*)GET_ROWCELL_INTERBUF(pResInfo); + + if (pInfo->algo == APERCT_ALGO_TDIGEST) { + if (pInfo->pTDigest->size > 0) { + pInfo->result = tdigestQuantile(pInfo->pTDigest, percent/100); + } else { // no need to free + //setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes); + return TSDB_CODE_SUCCESS; + } + } else { + if (pInfo->pHisto->numOfElems > 0) { + double ratio[] = {percent}; + double *res = tHistogramUniform(pInfo->pHisto, ratio, 1); + pInfo->result = *res; + //memcpy(pCtx->pOutput, res, sizeof(double)); + taosMemoryFree(res); + } else { // no need to free + //setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes); + return TSDB_CODE_SUCCESS; + } + } + + return functionFinalize(pCtx, pBlock); +} + bool getFirstLastFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) { SColumnNode* pNode = nodesListGetNode(pFunc->pParameterList, 0); pEnv->calcMemSize = pNode->node.resType.bytes + sizeof(int64_t); @@ -1802,8 +2069,6 @@ bool getSelectivityFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) { return true; } - - static FORCE_INLINE TSKEY getRowPTs(SColumnInfoData* pTsColInfo, int32_t rowIndex) { if (pTsColInfo == NULL) { return 0; @@ -1966,7 +2231,7 @@ int32_t lastFunction(SqlFunctionCtx* pCtx) { return TSDB_CODE_SUCCESS; } -int32_t lastFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { +int32_t firstLastFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { int32_t slotId = pCtx->pExpr->base.resSchema.slotId; SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId); @@ -1979,6 +2244,24 @@ int32_t lastFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { return pResInfo->numOfRes; } +int32_t lastCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) { + SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx); + char* pDBuf = GET_ROWCELL_INTERBUF(pDResInfo); + int32_t type = pDestCtx->input.pData[0]->info.type; + int32_t bytes = pDestCtx->input.pData[0]->info.bytes; + + SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx); + char* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo); + + if (pSResInfo->numOfRes != 0 && + (pDResInfo->numOfRes == 0 || *(TSKEY*)(pDBuf + bytes) < *(TSKEY*)(pSBuf + bytes)) ) { + memcpy(pDBuf, pSBuf, bytes); + *(TSKEY*)(pDBuf + bytes) = *(TSKEY*)(pSBuf + bytes); + pDResInfo->numOfRes = 1; + } + return TSDB_CODE_SUCCESS; +} + bool getDiffFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) { pEnv->calcMemSize = sizeof(SDiffInfo); return true; @@ -2019,15 +2302,15 @@ static void doSetPrevVal(SDiffInfo* pDiffInfo, int32_t type, const char* pv) { } static void doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, SColumnInfoData* pOutput, int32_t pos, int32_t order) { - int32_t factor = (order == TSDB_ORDER_ASC)? 1:-1; + int32_t factor = (order == TSDB_ORDER_ASC)? 1:-1; switch (type) { case TSDB_DATA_TYPE_INT: { int32_t v = *(int32_t*)pv; - int32_t delta = factor*(v - pDiffInfo->prev.i64); // direct previous may be null + int64_t delta = factor*(v - pDiffInfo->prev.i64); // direct previous may be null if (delta < 0 && pDiffInfo->ignoreNegative) { colDataSetNull_f(pOutput->nullbitmap, pos); } else { - colDataAppendInt32(pOutput, pos, &delta); + colDataAppendInt64(pOutput, pos, &delta); } pDiffInfo->prev.i64 = v; break; @@ -2035,22 +2318,22 @@ static void doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, SCo case TSDB_DATA_TYPE_BOOL: case TSDB_DATA_TYPE_TINYINT: { int8_t v = *(int8_t*)pv; - int8_t delta = factor*(v - pDiffInfo->prev.i64); // direct previous may be null + int64_t delta = factor*(v - pDiffInfo->prev.i64); // direct previous may be null if (delta < 0 && pDiffInfo->ignoreNegative) { colDataSetNull_f(pOutput->nullbitmap, pos); } else { - colDataAppendInt8(pOutput, pos, &delta); + colDataAppendInt64(pOutput, pos, &delta); } pDiffInfo->prev.i64 = v; break; } case TSDB_DATA_TYPE_SMALLINT: { int16_t v = *(int16_t*)pv; - int16_t delta = factor*(v - pDiffInfo->prev.i64); // direct previous may be null + int64_t delta = factor*(v - pDiffInfo->prev.i64); // direct previous may be null if (delta < 0 && pDiffInfo->ignoreNegative) { colDataSetNull_f(pOutput->nullbitmap, pos); } else { - colDataAppendInt16(pOutput, pos, &delta); + colDataAppendInt64(pOutput, pos, &delta); } pDiffInfo->prev.i64 = v; break; @@ -2068,11 +2351,11 @@ static void doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, SCo } case TSDB_DATA_TYPE_FLOAT: { float v = *(float*)pv; - float delta = factor*(v - pDiffInfo->prev.d64); // direct previous may be null - if (delta < 0 && pDiffInfo->ignoreNegative) { + double delta = factor*(v - pDiffInfo->prev.d64); // direct previous may be null + if ((delta < 0 && pDiffInfo->ignoreNegative) || isinf(delta) || isnan(delta)) { //check for overflow colDataSetNull_f(pOutput->nullbitmap, pos); } else { - colDataAppendFloat(pOutput, pos, &delta); + colDataAppendDouble(pOutput, pos, &delta); } pDiffInfo->prev.d64 = v; break; @@ -2080,7 +2363,7 @@ static void doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, SCo case TSDB_DATA_TYPE_DOUBLE: { double v = *(double*)pv; double delta = factor*(v - pDiffInfo->prev.d64); // direct previous may be null - if (delta < 0 && pDiffInfo->ignoreNegative) { + if ((delta < 0 && pDiffInfo->ignoreNegative) || isinf(delta) || isnan(delta)) { //check for overflow colDataSetNull_f(pOutput->nullbitmap, pos); } else { colDataAppendDouble(pOutput, pos, &delta); @@ -3250,7 +3533,12 @@ int32_t csumFunction(SqlFunctionCtx* pCtx) { double v; GET_TYPED_DATA(v, double, type, data); pSumRes->dsum += v; - colDataAppend(pOutput, pos, (char *)&pSumRes->dsum, false); + //check for overflow + if (isinf(pSumRes->dsum) || isnan(pSumRes->dsum)) { + colDataAppendNULL(pOutput, pos); + } else { + colDataAppend(pOutput, pos, (char *)&pSumRes->dsum, false); + } } //TODO: remove this after pTsOutput is handled @@ -3324,7 +3612,12 @@ int32_t mavgFunction(SqlFunctionCtx* pCtx) { pInfo->points[pInfo->pos] = v; double result = pInfo->sum / pInfo->numOfPoints; - colDataAppend(pOutput, pos, (char *)&result, false); + //check for overflow + if (isinf(result) || isnan(result)) { + colDataAppendNULL(pOutput, pos); + } else { + colDataAppend(pOutput, pos, (char *)&result, false); + } //TODO: remove this after pTsOutput is handled if (pTsOutput != NULL) { @@ -3398,7 +3691,6 @@ int32_t sampleFunction(SqlFunctionCtx* pCtx) { TSKEY* tsList = (int64_t*)pInput->pPTS->pData; SColumnInfoData* pInputCol = pInput->pData[0]; - SColumnInfoData* pTsOutput = pCtx->pTsOutput; SColumnInfoData* pOutput = (SColumnInfoData*)pCtx->pOutput; int32_t startOffset = pCtx->offset; @@ -3421,24 +3713,6 @@ int32_t sampleFunction(SqlFunctionCtx* pCtx) { return pInfo->numSampled; } -//int32_t sampleFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { -// SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); -// SSampleInfo* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); -// int32_t slotId = pCtx->pExpr->base.resSchema.slotId; -// SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId); -// -// //int32_t currentRow = pBlock->info.rows; -// pResInfo->numOfRes = pInfo->numSampled; -// -// for (int32_t i = 0; i < pInfo->numSampled; ++i) { -// colDataAppend(pCol, i, pInfo->data + i * pInfo->colBytes, false); -// //TODO: handle ts output -// } -// -// return pResInfo->numOfRes; -//} - - bool getTailFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) { SColumnNode* pCol = (SColumnNode*)nodesListGetNode(pFunc->pParameterList, 0); SValueNode* pVal = (SValueNode*)nodesListGetNode(pFunc->pParameterList, 1); @@ -3484,6 +3758,7 @@ static void tailAssignResult(STailItem* pItem, char *data, int32_t colBytes, TSK if (isNull) { pItem->isNull = true; } else { + pItem->isNull = false; memcpy(pItem->data, data, colBytes); } } @@ -3514,7 +3789,6 @@ int32_t tailFunction(SqlFunctionCtx* pCtx) { TSKEY* tsList = (int64_t*)pInput->pPTS->pData; SColumnInfoData* pInputCol = pInput->pData[0]; - SColumnInfoData* pTsOutput = pCtx->pTsOutput; SColumnInfoData* pOutput = (SColumnInfoData*)pCtx->pOutput; int32_t startOffset = pCtx->offset; @@ -3590,8 +3864,21 @@ bool uniqueFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResInfo) { } static void doUniqueAdd(SUniqueInfo* pInfo, char *data, TSKEY ts, bool isNull) { - int32_t hashKeyBytes = IS_VAR_DATA_TYPE(pInfo->colType) ? varDataTLen(data) : pInfo->colBytes; + //handle null elements + if (isNull == true) { + int32_t size = sizeof(SUniqueItem) + pInfo->colBytes; + SUniqueItem *pItem = (SUniqueItem *)(pInfo->pItems + pInfo->numOfPoints * size); + if (pItem->isNull == false) { + pItem->timestamp = ts; + pItem->isNull = true; + pInfo->numOfPoints++; + } else if (pItem->timestamp > ts && pItem->isNull == true) { + pItem->timestamp = ts; + } + return; + } + int32_t hashKeyBytes = IS_VAR_DATA_TYPE(pInfo->colType) ? varDataTLen(data) : pInfo->colBytes; SUniqueItem *pHashItem = taosHashGet(pInfo->pHash, data, hashKeyBytes); if (pHashItem == NULL) { int32_t size = sizeof(SUniqueItem) + pInfo->colBytes; @@ -3604,7 +3891,6 @@ static void doUniqueAdd(SUniqueInfo* pInfo, char *data, TSKEY ts, bool isNull) { } else if (pHashItem->timestamp > ts) { pHashItem->timestamp = ts; } - } int32_t uniqueFunction(SqlFunctionCtx* pCtx) { @@ -3631,7 +3917,11 @@ int32_t uniqueFunction(SqlFunctionCtx* pCtx) { for (int32_t i = 0; i < pInfo->numOfPoints; ++i) { SUniqueItem *pItem = (SUniqueItem *)(pInfo->pItems + i * (sizeof(SUniqueItem) + pInfo->colBytes)); - colDataAppend(pOutput, i, pItem->data, false); + if (pItem->isNull == true) { + colDataAppendNULL(pOutput, i); + } else { + colDataAppend(pOutput, i, pItem->data, false); + } if (pTsOutput != NULL) { colDataAppendInt64(pTsOutput, i, &pItem->timestamp); } @@ -3642,7 +3932,7 @@ int32_t uniqueFunction(SqlFunctionCtx* pCtx) { int32_t uniqueFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); - SUniqueInfo* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); + SUniqueInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo); int32_t slotId = pCtx->pExpr->base.resSchema.slotId; SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId); @@ -3655,3 +3945,260 @@ int32_t uniqueFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { return pResInfo->numOfRes; } +typedef struct STwaInfo { + double dOutput; + SPoint1 p; + STimeWindow win; +} STwaInfo; + +bool getTwaFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv) { + pEnv->calcMemSize = sizeof(STwaInfo); + return true; +} + +bool twaFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo) { + if (!functionSetup(pCtx, pResultInfo)) { + return false; + } + + STwaInfo *pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); + pInfo->p.key = INT64_MIN; + pInfo->win = TSWINDOW_INITIALIZER; + return true; +} + +static double twa_get_area(SPoint1 s, SPoint1 e) { + if ((s.val >= 0 && e.val >= 0)|| (s.val <=0 && e.val <= 0)) { + return (s.val + e.val) * (e.key - s.key) / 2; + } + + double x = (s.key * e.val - e.key * s.val)/(e.val - s.val); + double val = (s.val * (x - s.key) + e.val * (e.key - x)) / 2; + return val; +} + +#define INIT_INTP_POINT(_p, _k, _v) \ + do { \ + (_p).key = (_k); \ + (_p).val = (_v); \ + } while (0) + +int32_t twaFunction(SqlFunctionCtx* pCtx) { + SInputColumnInfoData* pInput = &pCtx->input; + SColumnInfoData* pInputCol = pInput->pData[0]; + + TSKEY* tsList = (int64_t*)pInput->pPTS->pData; + + SResultRowEntryInfo *pResInfo = GET_RES_INFO(pCtx); + + STwaInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); + SPoint1 *last = &pInfo->p; + int32_t numOfElems = 0; + + int32_t i = pInput->startRowIndex; + if (pCtx->start.key != INT64_MIN) { + ASSERT((pCtx->start.key < tsList[i] && pCtx->order == TSDB_ORDER_ASC) || + (pCtx->start.key > tsList[i] && pCtx->order == TSDB_ORDER_DESC)); + + ASSERT(last->key == INT64_MIN); + last->key = tsList[i]; + + GET_TYPED_DATA(last->val, double, pInputCol->info.type, colDataGetData(pInputCol, i)); + + pInfo->dOutput += twa_get_area(pCtx->start, *last); + pInfo->win.skey = pCtx->start.key; + numOfElems++; + i += 1; + } else if (pInfo->p.key == INT64_MIN) { + last->key = tsList[i]; + GET_TYPED_DATA(last->val, double, pInputCol->info.type, colDataGetData(pInputCol, i)); + + pInfo->win.skey = last->key; + numOfElems++; + i += 1; + } + + SPoint1 st = {0}; + + // calculate the value of + switch(pInputCol->info.type) { + case TSDB_DATA_TYPE_TINYINT: { + int8_t *val = (int8_t*) colDataGetData(pInputCol, 0); + for (; i < pInput->numOfRows + pInput->startRowIndex; i += 1) { + if (colDataIsNull_f(pInputCol->nullbitmap, i)) { + continue; + } + + INIT_INTP_POINT(st, tsList[i], val[i]); + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; + } + break; + } + + case TSDB_DATA_TYPE_SMALLINT: { + int16_t *val = (int16_t*) colDataGetData(pInputCol, 0); + for (; i < pInput->numOfRows + pInput->startRowIndex; i += 1) { + if (colDataIsNull_f(pInputCol->nullbitmap, i)) { + continue; + } + + INIT_INTP_POINT(st, tsList[i], val[i]); + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; + } + break; + } + case TSDB_DATA_TYPE_INT: { + int32_t *val = (int32_t*) colDataGetData(pInputCol, 0); + for (; i < pInput->numOfRows + pInput->startRowIndex; i += 1) { + if (colDataIsNull_f(pInputCol->nullbitmap, i)) { + continue; + } + + INIT_INTP_POINT(st, tsList[i], val[i]); + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; + } + break; + } + case TSDB_DATA_TYPE_BIGINT: { + int64_t *val = (int64_t*) colDataGetData(pInputCol, 0); + for (; i < pInput->numOfRows + pInput->startRowIndex; i += 1) { + if (colDataIsNull_f(pInputCol->nullbitmap, i)) { + continue; + } + + INIT_INTP_POINT(st, tsList[i], val[i]); + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; + } + break; + } + case TSDB_DATA_TYPE_FLOAT: { + float *val = (float*) colDataGetData(pInputCol, 0); + for (; i < pInput->numOfRows + pInput->startRowIndex; i += 1) { + if (colDataIsNull_f(pInputCol->nullbitmap, i)) { + continue; + } + + INIT_INTP_POINT(st, tsList[i], val[i]); + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; + } + break; + } + case TSDB_DATA_TYPE_DOUBLE: { + double *val = (double*) colDataGetData(pInputCol, 0); + for (; i < pInput->numOfRows + pInput->startRowIndex; i += 1) { + if (colDataIsNull_f(pInputCol->nullbitmap, i)) { + continue; + } + + INIT_INTP_POINT(st, tsList[i], val[i]); + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; + } + break; + } + case TSDB_DATA_TYPE_UTINYINT: { + uint8_t *val = (uint8_t*) colDataGetData(pInputCol, 0); + for (; i < pInput->numOfRows + pInput->startRowIndex; i += 1) { + if (colDataIsNull_f(pInputCol->nullbitmap, i)) { + continue; + } + + INIT_INTP_POINT(st, tsList[i], val[i]); + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; + } + break; + } + case TSDB_DATA_TYPE_USMALLINT: { + uint16_t *val = (uint16_t*) colDataGetData(pInputCol, 0); + for (; i < pInput->numOfRows + pInput->startRowIndex; i += 1) { + if (colDataIsNull_f(pInputCol->nullbitmap, i)) { + continue; + } + + INIT_INTP_POINT(st, tsList[i], val[i]); + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; + } + break; + } + case TSDB_DATA_TYPE_UINT: { + uint32_t *val = (uint32_t*) colDataGetData(pInputCol, 0); + for (; i < pInput->numOfRows + pInput->startRowIndex; i += 1) { + if (colDataIsNull_f(pInputCol->nullbitmap, i)) { + continue; + } + + INIT_INTP_POINT(st, tsList[i], val[i]); + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; + } + break; + } + case TSDB_DATA_TYPE_UBIGINT: { + uint64_t *val = (uint64_t*) colDataGetData(pInputCol, 0); + for (; i < pInput->numOfRows + pInput->startRowIndex; i += 1) { + if (colDataIsNull_f(pInputCol->nullbitmap, i)) { + continue; + } + + INIT_INTP_POINT(st, tsList[i], val[i]); + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; + } + break; + } + + default: ASSERT(0); + } + + // the last interpolated time window value + if (pCtx->end.key != INT64_MIN) { + pInfo->dOutput += twa_get_area(pInfo->p, pCtx->end); + pInfo->p = pCtx->end; + } + + pInfo->win.ekey = pInfo->p.key; + + SET_VAL(pResInfo, numOfElems, 1); + return TSDB_CODE_SUCCESS; +} + +/* + * To copy the input to interResBuf to avoid the input buffer space be over writen + * by next input data. The TWA function only applies to each table, so no merge procedure + * is required, we simply copy to the resut ot interResBuffer. + */ +//void twa_function_copy(SQLFunctionCtx *pCtx) { +// assert(pCtx->inputType == TSDB_DATA_TYPE_BINARY); +// SResultRowEntryInfo *pResInfo = GET_RES_INFO(pCtx); +// +// memcpy(GET_ROWCELL_INTERBUF(pResInfo), pCtx->pInput, (size_t)pCtx->inputBytes); +// pResInfo->hasResult = ((STwaInfo *)pCtx->pInput)->hasResult; +//} + +int32_t twaFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock* pBlock) { + SResultRowEntryInfo *pResInfo = GET_RES_INFO(pCtx); + + STwaInfo *pInfo = (STwaInfo *)GET_ROWCELL_INTERBUF(pResInfo); + if (pResInfo->numOfRes == 0) { + pResInfo->isNullRes = 1; + } else { + // assert(pInfo->win.ekey == pInfo->p.key && pInfo->hasResult == pResInfo->hasResult); + if (pInfo->win.ekey == pInfo->win.skey) { + pInfo->dOutput = pInfo->p.val; + } else { + pInfo->dOutput = pInfo->dOutput / (pInfo->win.ekey - pInfo->win.skey); + } + + pResInfo->numOfRes = 1; + } + + return functionFinalize(pCtx, pBlock); +} + diff --git a/source/libs/function/src/functionMgt.c b/source/libs/function/src/functionMgt.c index 3b1e66f2ad5a4818e8d8b3e502b14a825d66c8e8..c2b325bc928be50ac908c103bb6a14a907156b39 100644 --- a/source/libs/function/src/functionMgt.c +++ b/source/libs/function/src/functionMgt.c @@ -16,7 +16,6 @@ #include "functionMgt.h" #include "builtins.h" -#include "catalog.h" #include "functionMgtInt.h" #include "taos.h" #include "taoserror.h" @@ -65,39 +64,19 @@ static bool isSpecificClassifyFunc(int32_t funcId, uint64_t classification) { return FUNC_MGT_TEST_MASK(funcMgtBuiltins[funcId].classification, classification); } -static int32_t getUdfInfo(SFmGetFuncInfoParam* pParam, SFunctionNode* pFunc) { - SFuncInfo* pInfo = NULL; - int32_t code = catalogGetUdfInfo(pParam->pCtg, pParam->pRpc, pParam->pMgmtEps, pFunc->functionName, &pInfo); - if (TSDB_CODE_SUCCESS != code) { - return code; - } - if (NULL == pInfo) { - snprintf(pParam->pErrBuf, pParam->errBufLen, "Invalid function name: %s", pFunc->functionName); - return TSDB_CODE_FUNC_INVALID_FUNTION; - } - pFunc->funcType = FUNCTION_TYPE_UDF; - pFunc->funcId = TSDB_FUNC_TYPE_AGGREGATE == pInfo->funcType ? FUNC_AGGREGATE_UDF_ID : FUNC_SCALAR_UDF_ID; - pFunc->node.resType.type = pInfo->outputType; - pFunc->node.resType.bytes = pInfo->outputLen; - pFunc->udfBufSize = pInfo->bufSize; - tFreeSFuncInfo(pInfo); - taosMemoryFree(pInfo); - return TSDB_CODE_SUCCESS; -} - int32_t fmFuncMgtInit() { taosThreadOnce(&functionHashTableInit, doInitFunctionTable); return initFunctionCode; } -int32_t fmGetFuncInfo(SFmGetFuncInfoParam* pParam, SFunctionNode* pFunc) { +int32_t fmGetFuncInfo(SFunctionNode* pFunc, char* pMsg, int32_t msgLen) { void* pVal = taosHashGet(gFunMgtService.pFuncNameHashTable, pFunc->functionName, strlen(pFunc->functionName)); if (NULL != pVal) { pFunc->funcId = *(int32_t*)pVal; pFunc->funcType = funcMgtBuiltins[pFunc->funcId].type; - return funcMgtBuiltins[pFunc->funcId].translateFunc(pFunc, pParam->pErrBuf, pParam->errBufLen); + return funcMgtBuiltins[pFunc->funcId].translateFunc(pFunc, pMsg, msgLen); } - return getUdfInfo(pParam, pFunc); + return TSDB_CODE_FUNC_NOT_BUILTIN_FUNTION; } bool fmIsBuiltinFunc(const char* pFunc) { @@ -122,6 +101,7 @@ int32_t fmGetFuncExecFuncs(int32_t funcId, SFuncExecFuncs* pFpSet) { pFpSet->init = funcMgtBuiltins[funcId].initFunc; pFpSet->process = funcMgtBuiltins[funcId].processFunc; pFpSet->finalize = funcMgtBuiltins[funcId].finalizeFunc; + pFpSet->combine = funcMgtBuiltins[funcId].combineFunc; return TSDB_CODE_SUCCESS; } @@ -149,6 +129,8 @@ bool fmIsAggFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MG bool fmIsScalarFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_SCALAR_FUNC); } +bool fmIsVectorFunc(int32_t funcId) { return !fmIsScalarFunc(funcId); } + bool fmIsSelectFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_SELECT_FUNC); } bool fmIsTimelineFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_TIMELINE_FUNC); } @@ -161,7 +143,7 @@ bool fmIsWindowPseudoColumnFunc(int32_t funcId) { return isSpecificClassifyFunc( bool fmIsWindowClauseFunc(int32_t funcId) { return fmIsAggFunc(funcId) || fmIsWindowPseudoColumnFunc(funcId); } -bool fmIsNonstandardSQLFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_NONSTANDARD_SQL_FUNC); } +bool fmIsIndefiniteRowsFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_INDEFINITE_ROWS_FUNC); } bool fmIsSpecialDataRequiredFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_SPECIAL_DATA_REQUIRED); diff --git a/source/libs/function/src/taggfunction.c b/source/libs/function/src/taggfunction.c index 950655e480b2b3413f26bc56d4771461b0dc4277..e683a38cbd1fd97ac7ba081a65f2af8ac18b8fee 100644 --- a/source/libs/function/src/taggfunction.c +++ b/source/libs/function/src/taggfunction.c @@ -236,7 +236,7 @@ bool isRowEntryCompleted(struct SResultRowEntryInfo* pEntry) { bool isRowEntryInitialized(struct SResultRowEntryInfo* pEntry) { return pEntry->initialized; } - +#if 0 int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, SResultDataInfo* pInfo, int16_t extLength, bool isSuperTable/*, SUdfInfo* pUdfInfo*/) { if (!isValidDataType(dataType)) { @@ -470,6 +470,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI return TSDB_CODE_SUCCESS; } +#endif static bool function_setup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo) { if (pResultInfo->initialized) { diff --git a/source/libs/function/src/texpr.c b/source/libs/function/src/texpr.c index b91af2d1577fc994ccaa6b11b8e9044ffb88b594..703b19ced7e1abeee312a414aafe6b34b936c271 100644 --- a/source/libs/function/src/texpr.c +++ b/source/libs/function/src/texpr.c @@ -36,12 +36,7 @@ void tExprTreeDestroy(tExprNode *pNode, void (*fp)(void *)) { if (pNode->nodeType == TEXPR_BINARYEXPR_NODE || pNode->nodeType == TEXPR_UNARYEXPR_NODE) { doExprTreeDestroy(&pNode, fp); - } else if (pNode->nodeType == TEXPR_VALUE_NODE) { - taosVariantDestroy(pNode->pVal); - } else if (pNode->nodeType == TEXPR_COL_NODE) { - taosMemoryFreeClear(pNode->pSchema); } - taosMemoryFree(pNode); } @@ -49,15 +44,6 @@ static void doExprTreeDestroy(tExprNode **pExpr, void (*fp)(void *)) { if (*pExpr == NULL) { return; } - - int32_t type = (*pExpr)->nodeType; - if (type == TEXPR_VALUE_NODE) { - taosVariantDestroy((*pExpr)->pVal); - taosMemoryFree((*pExpr)->pVal); - } else if (type == TEXPR_COL_NODE) { - taosMemoryFree((*pExpr)->pSchema); - } - taosMemoryFree(*pExpr); *pExpr = NULL; } diff --git a/source/libs/function/src/tpercentile.c b/source/libs/function/src/tpercentile.c index dd57024624f09d807996309c34e3990a39fdab46..90d0640f403ef3be0949f52d1313b715b225ced0 100644 --- a/source/libs/function/src/tpercentile.c +++ b/source/libs/function/src/tpercentile.c @@ -255,7 +255,7 @@ tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType, double minval, resetSlotInfo(pBucket); - int32_t ret = createDiskbasedBuf(&pBucket->pBuffer, pBucket->bufPageSize, pBucket->bufPageSize * 512, "1", "/tmp"); + int32_t ret = createDiskbasedBuf(&pBucket->pBuffer, pBucket->bufPageSize, pBucket->bufPageSize * 512, "1", TD_TMP_DIR_PATH); if (ret != 0) { tMemBucketDestroy(pBucket); return NULL; diff --git a/source/libs/function/src/tudf.c b/source/libs/function/src/tudf.c index 5f20d2e50a50fd0cef4e3b9cbaa21d22fb930464..441648e52b2ef78326d73d1944bcfbfd0009abc6 100644 --- a/source/libs/function/src/tudf.c +++ b/source/libs/function/src/tudf.c @@ -24,7 +24,6 @@ #include "builtinsimpl.h" #include "functionMgt.h" -//TODO: add unit test typedef struct SUdfdData { bool startCalled; bool needCleanUp; @@ -45,7 +44,15 @@ typedef struct SUdfdData { SUdfdData udfdGlobal = {0}; +int32_t udfStartUdfd(int32_t startDnodeId); +int32_t udfStopUdfd(); + static int32_t udfSpawnUdfd(SUdfdData *pData); +void udfUdfdExit(uv_process_t *process, int64_t exitStatus, int termSignal); +static int32_t udfSpawnUdfd(SUdfdData* pData); +static void udfUdfdCloseWalkCb(uv_handle_t* handle, void* arg); +static void udfUdfdStopAsyncCb(uv_async_t *async); +static void udfWatchUdfd(void *args); void udfUdfdExit(uv_process_t *process, int64_t exitStatus, int termSignal) { fnInfo("udfd process exited with status %" PRId64 ", signal %d", exitStatus, termSignal); @@ -65,12 +72,20 @@ static int32_t udfSpawnUdfd(SUdfdData* pData) { char path[PATH_MAX] = {0}; if (tsProcPath == NULL) { path[0] = '.'; + #ifdef WINDOWS + GetModuleFileName(NULL, path, PATH_MAX); + taosDirName(path); + #endif } else { strncpy(path, tsProcPath, strlen(tsProcPath)); taosDirName(path); } #ifdef WINDOWS - strcat(path, "udfd.exe"); + if (strlen(path)==0) { + strcat(path, "udfd.exe"); + } else { + strcat(path, "\\udfd.exe"); + } #else strcat(path, "/udfd"); #endif @@ -413,6 +428,34 @@ enum { UDFC_STATE_STOPPING, // stopping after udfcClose }; +int32_t getUdfdPipeName(char* pipeName, int32_t size); +int32_t encodeUdfSetupRequest(void **buf, const SUdfSetupRequest *setup); +void* decodeUdfSetupRequest(const void* buf, SUdfSetupRequest *request); +int32_t encodeUdfInterBuf(void **buf, const SUdfInterBuf* state); +void* decodeUdfInterBuf(const void* buf, SUdfInterBuf* state); +int32_t encodeUdfCallRequest(void **buf, const SUdfCallRequest *call); +void* decodeUdfCallRequest(const void* buf, SUdfCallRequest* call); +int32_t encodeUdfTeardownRequest(void **buf, const SUdfTeardownRequest *teardown); +void* decodeUdfTeardownRequest(const void* buf, SUdfTeardownRequest *teardown); +int32_t encodeUdfRequest(void** buf, const SUdfRequest* request); +void* decodeUdfRequest(const void* buf, SUdfRequest* request); +int32_t encodeUdfSetupResponse(void **buf, const SUdfSetupResponse *setupRsp); +void* decodeUdfSetupResponse(const void* buf, SUdfSetupResponse* setupRsp); +int32_t encodeUdfCallResponse(void **buf, const SUdfCallResponse *callRsp); +void* decodeUdfCallResponse(const void* buf, SUdfCallResponse* callRsp); +int32_t encodeUdfTeardownResponse(void** buf, const SUdfTeardownResponse* teardownRsp); +void* decodeUdfTeardownResponse(const void* buf, SUdfTeardownResponse* teardownResponse); +int32_t encodeUdfResponse(void** buf, const SUdfResponse* rsp); +void* decodeUdfResponse(const void* buf, SUdfResponse* rsp); +void freeUdfColumnData(SUdfColumnData *data, SUdfColumnMeta *meta); +void freeUdfColumn(SUdfColumn* col); +void freeUdfDataDataBlock(SUdfDataBlock *block); +void freeUdfInterBuf(SUdfInterBuf *buf); +int32_t convertDataBlockToUdfDataBlock(SSDataBlock *block, SUdfDataBlock *udfBlock); +int32_t convertUdfColumnToDataBlock(SUdfColumn *udfCol, SSDataBlock *block); +int32_t convertScalarParamToDataBlock(SScalarParam *input, int32_t numOfCols, SSDataBlock *output); +int32_t convertDataBlockToScalarParm(SSDataBlock *input, SScalarParam *output); + int32_t getUdfdPipeName(char* pipeName, int32_t size) { char dnodeId[8] = {0}; size_t dnodeIdSize = sizeof(dnodeId); @@ -650,7 +693,7 @@ int32_t encodeUdfResponse(void** buf, const SUdfResponse* rsp) { len += encodeUdfTeardownResponse(buf, &rsp->teardownRsp); break; default: - //TODO: log error + fnError("encode udf response, invalid udf response type %d", rsp->type); break; } return len; @@ -676,7 +719,7 @@ void* decodeUdfResponse(const void* buf, SUdfResponse* rsp) { buf = decodeUdfTeardownResponse(buf, &rsp->teardownRsp); break; default: - //TODO: log error + fnError("decode udf response, invalid udf response type %d", rsp->type); break; } return (void*)buf; @@ -817,185 +860,495 @@ int32_t convertDataBlockToScalarParm(SSDataBlock *input, SScalarParam *output) { return 0; } -void onUdfcPipeClose(uv_handle_t *handle) { - SClientUvConn *conn = handle->data; - if (!QUEUE_EMPTY(&conn->taskQueue)) { - QUEUE* h = QUEUE_HEAD(&conn->taskQueue); - SClientUvTaskNode *task = QUEUE_DATA(h, SClientUvTaskNode, connTaskQueue); - task->errCode = 0; - QUEUE_REMOVE(&task->procTaskQueue); - uv_sem_post(&task->taskSem); - } - conn->session->udfUvPipe = NULL; - taosMemoryFree(conn->readBuf.buf); - taosMemoryFree(conn); - taosMemoryFree((uv_pipe_t *) handle); -} - -int32_t udfcGetUdfTaskResultFromUvTask(SClientUdfTask *task, SClientUvTaskNode *uvTask) { - fnDebug("udfc get uv task result. task: %p, uvTask: %p", task, uvTask); - if (uvTask->type == UV_TASK_REQ_RSP) { - if (uvTask->rspBuf.base != NULL) { - SUdfResponse rsp = {0}; - void* buf = decodeUdfResponse(uvTask->rspBuf.base, &rsp); - assert(uvTask->rspBuf.len == POINTER_DISTANCE(buf, uvTask->rspBuf.base)); - task->errCode = rsp.code; +////////////////////////////////////////////////////////////////////////////////////////////////////////////// +//memory layout |---SUdfAggRes----|-----final result-----|---inter result----| +typedef struct SUdfAggRes { + int8_t finalResNum; + int8_t interResNum; + char* finalResBuf; + char* interResBuf; +} SUdfAggRes; +void onUdfcPipeClose(uv_handle_t *handle); +int32_t udfcGetUdfTaskResultFromUvTask(SClientUdfTask *task, SClientUvTaskNode *uvTask); +void udfcAllocateBuffer(uv_handle_t *handle, size_t suggestedSize, uv_buf_t *buf); +bool isUdfcUvMsgComplete(SClientConnBuf *connBuf); +void udfcUvHandleRsp(SClientUvConn *conn); +void udfcUvHandleError(SClientUvConn *conn); +void onUdfcPipeRead(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf); +void onUdfcPipetWrite(uv_write_t *write, int status); +void onUdfcPipeConnect(uv_connect_t *connect, int status); +int32_t udfcCreateUvTask(SClientUdfTask *task, int8_t uvTaskType, SClientUvTaskNode **pUvTask); +int32_t udfcQueueUvTask(SClientUvTaskNode *uvTask); +int32_t udfcStartUvTask(SClientUvTaskNode *uvTask); +void udfcAsyncTaskCb(uv_async_t *async); +void cleanUpUvTasks(SUdfcProxy *udfc); +void udfStopAsyncCb(uv_async_t *async); +void constructUdfService(void *argsThread); +int32_t udfcRunUdfUvTask(SClientUdfTask *task, int8_t uvTaskType); +int32_t doSetupUdf(char udfName[], UdfcFuncHandle *funcHandle); +int compareUdfcFuncSub(const void* elem1, const void* elem2); +int32_t doTeardownUdf(UdfcFuncHandle handle); - switch (task->type) { - case UDF_TASK_SETUP: { - //TODO: copy or not - task->_setup.rsp = rsp.setupRsp; - break; - } - case UDF_TASK_CALL: { - task->_call.rsp = rsp.callRsp; - //TODO: copy or not - break; - } - case UDF_TASK_TEARDOWN: { - task->_teardown.rsp = rsp.teardownRsp; - //TODO: copy or not? - break; - } - default: { - break; - } - } +int32_t callUdf(UdfcFuncHandle handle, int8_t callType, SSDataBlock *input, SUdfInterBuf *state, SUdfInterBuf *state2, + SSDataBlock* output, SUdfInterBuf *newState); +int32_t doCallUdfAggInit(UdfcFuncHandle handle, SUdfInterBuf *interBuf); +int32_t doCallUdfAggProcess(UdfcFuncHandle handle, SSDataBlock *block, SUdfInterBuf *state, SUdfInterBuf *newState); +int32_t doCallUdfAggMerge(UdfcFuncHandle handle, SUdfInterBuf *interBuf1, SUdfInterBuf *interBuf2, SUdfInterBuf *resultBuf); +int32_t doCallUdfAggFinalize(UdfcFuncHandle handle, SUdfInterBuf *interBuf, SUdfInterBuf *resultData); +int32_t doCallUdfScalarFunc(UdfcFuncHandle handle, SScalarParam *input, int32_t numOfCols, SScalarParam* output); +int32_t callUdfScalarFunc(char *udfName, SScalarParam *input, int32_t numOfCols, SScalarParam *output); + +int32_t udfcOpen(); +int32_t udfcClose(); + +int32_t acquireUdfFuncHandle(char* udfName, UdfcFuncHandle* pHandle); +void releaseUdfFuncHandle(char* udfName); +int32_t cleanUpUdfs(); + +bool udfAggGetEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv); +bool udfAggInit(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo* pResultCellInfo); +int32_t udfAggProcess(struct SqlFunctionCtx *pCtx); +int32_t udfAggFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock* pBlock); - // TODO: the call buffer is setup and freed by udf invocation - taosMemoryFree(uvTask->rspBuf.base); - } else { - task->errCode = uvTask->errCode; - } - } else if (uvTask->type == UV_TASK_CONNECT) { - task->errCode = uvTask->errCode; - } else if (uvTask->type == UV_TASK_DISCONNECT) { - task->errCode = uvTask->errCode; - } - return 0; +int compareUdfcFuncSub(const void* elem1, const void* elem2) { + SUdfcFuncStub *stub1 = (SUdfcFuncStub *)elem1; + SUdfcFuncStub *stub2 = (SUdfcFuncStub *)elem2; + return strcmp(stub1->udfName, stub2->udfName); } -void udfcAllocateBuffer(uv_handle_t *handle, size_t suggestedSize, uv_buf_t *buf) { - SClientUvConn *conn = handle->data; - SClientConnBuf *connBuf = &conn->readBuf; - - int32_t msgHeadSize = sizeof(int32_t) + sizeof(int64_t); - if (connBuf->cap == 0) { - connBuf->buf = taosMemoryMalloc(msgHeadSize); - if (connBuf->buf) { - connBuf->len = 0; - connBuf->cap = msgHeadSize; - connBuf->total = -1; - - buf->base = connBuf->buf; - buf->len = connBuf->cap; +int32_t acquireUdfFuncHandle(char* udfName, UdfcFuncHandle* pHandle) { + int32_t code = 0; + uv_mutex_lock(&gUdfdProxy.udfStubsMutex); + SUdfcFuncStub key = {0}; + strcpy(key.udfName, udfName); + int32_t stubIndex = taosArraySearchIdx(gUdfdProxy.udfStubs, &key, compareUdfcFuncSub, TD_EQ); + if (stubIndex != -1) { + SUdfcFuncStub *foundStub = taosArrayGet(gUdfdProxy.udfStubs, stubIndex); + UdfcFuncHandle handle = foundStub->handle; + if (handle != NULL && ((SUdfcUvSession*)handle)->udfUvPipe != NULL) { + *pHandle = foundStub->handle; + ++foundStub->refCount; + foundStub->lastRefTime = taosGetTimestampUs(); + uv_mutex_unlock(&gUdfdProxy.udfStubsMutex); + return 0; } else { - fnError("udfc allocate buffer failure. size: %d", msgHeadSize); - buf->base = NULL; - buf->len = 0; + fnInfo("invalid handle for %s, refCount: %d, last ref time: %"PRId64". remove it from cache", + udfName, foundStub->refCount, foundStub->lastRefTime); + taosArrayRemove(gUdfdProxy.udfStubs, stubIndex); } + } + *pHandle = NULL; + code = doSetupUdf(udfName, pHandle); + if (code == TSDB_CODE_SUCCESS) { + SUdfcFuncStub stub = {0}; + strcpy(stub.udfName, udfName); + stub.handle = *pHandle; + ++stub.refCount; + stub.lastRefTime = taosGetTimestampUs(); + taosArrayPush(gUdfdProxy.udfStubs, &stub); + taosArraySort(gUdfdProxy.udfStubs, compareUdfcFuncSub); } else { - connBuf->cap = connBuf->total > connBuf->cap ? connBuf->total : connBuf->cap; - void *resultBuf = taosMemoryRealloc(connBuf->buf, connBuf->cap); - if (resultBuf) { - connBuf->buf = resultBuf; - buf->base = connBuf->buf + connBuf->len; - buf->len = connBuf->cap - connBuf->len; - } else { - fnError("udfc re-allocate buffer failure. size: %d", connBuf->cap); - buf->base = NULL; - buf->len = 0; - } + *pHandle = NULL; } - fnTrace("conn buf cap - len - total : %d - %d - %d", connBuf->cap, connBuf->len, connBuf->total); - + uv_mutex_unlock(&gUdfdProxy.udfStubsMutex); + return code; } -bool isUdfcUvMsgComplete(SClientConnBuf *connBuf) { - if (connBuf->total == -1 && connBuf->len >= sizeof(int32_t)) { - connBuf->total = *(int32_t *) (connBuf->buf); +void releaseUdfFuncHandle(char* udfName) { + uv_mutex_lock(&gUdfdProxy.udfStubsMutex); + SUdfcFuncStub key = {0}; + strcpy(key.udfName, udfName); + SUdfcFuncStub *foundStub = taosArraySearch(gUdfdProxy.udfStubs, &key, compareUdfcFuncSub, TD_EQ); + if (!foundStub) { + return; } - if (connBuf->len == connBuf->cap && connBuf->total == connBuf->cap) { - fnTrace("udfc complete message is received, now handle it"); - return true; + if (foundStub->refCount > 0) { + --foundStub->refCount; } - return false; + uv_mutex_unlock(&gUdfdProxy.udfStubsMutex); } -void udfcUvHandleRsp(SClientUvConn *conn) { - SClientConnBuf *connBuf = &conn->readBuf; - int64_t seqNum = *(int64_t *) (connBuf->buf + sizeof(int32_t)); // msglen then seqnum - - if (QUEUE_EMPTY(&conn->taskQueue)) { - fnError("udfc no task waiting for response on connection"); - return; - } - bool found = false; - SClientUvTaskNode *taskFound = NULL; - QUEUE* h = QUEUE_NEXT(&conn->taskQueue); - SClientUvTaskNode *task = QUEUE_DATA(h, SClientUvTaskNode, connTaskQueue); - - while (h != &conn->taskQueue) { - if (task->seqNum == seqNum) { - if (found == false) { - found = true; - taskFound = task; +int32_t cleanUpUdfs() { + uv_mutex_lock(&gUdfdProxy.udfStubsMutex); + int32_t i = 0; + SArray* udfStubs = taosArrayInit(16, sizeof(SUdfcFuncStub)); + while (i < taosArrayGetSize(gUdfdProxy.udfStubs)) { + SUdfcFuncStub *stub = taosArrayGet(gUdfdProxy.udfStubs, i); + if (stub->refCount == 0) { + fnInfo("tear down udf. udf name: %s, handle: %p, ref count: %d", stub->udfName, stub->handle, stub->refCount); + doTeardownUdf(stub->handle); + } else { + fnInfo("udf still in use. udf name: %s, ref count: %d, last ref time: %"PRId64", handle: %p", + stub->udfName, stub->refCount, stub->lastRefTime, stub->handle); + UdfcFuncHandle handle = stub->handle; + if (handle != NULL && ((SUdfcUvSession*)handle)->udfUvPipe != NULL) { + taosArrayPush(udfStubs, stub); } else { - fnError("udfc more than one task waiting for the same response"); - continue; + fnInfo("udf invalid handle for %s, refCount: %d, last ref time: %"PRId64". remove it from cache", + stub->udfName, stub->refCount, stub->lastRefTime); } } - h = QUEUE_NEXT(h); - task = QUEUE_DATA(h, SClientUvTaskNode, connTaskQueue); + ++i; } + taosArrayDestroy(gUdfdProxy.udfStubs); + gUdfdProxy.udfStubs = udfStubs; + uv_mutex_unlock(&gUdfdProxy.udfStubsMutex); + return 0; +} - if (taskFound) { - taskFound->rspBuf = uv_buf_init(connBuf->buf, connBuf->len); - QUEUE_REMOVE(&taskFound->connTaskQueue); - QUEUE_REMOVE(&taskFound->procTaskQueue); - uv_sem_post(&taskFound->taskSem); +int32_t callUdfScalarFunc(char *udfName, SScalarParam *input, int32_t numOfCols, SScalarParam *output) { + UdfcFuncHandle handle = NULL; + int32_t code = acquireUdfFuncHandle(udfName, &handle); + if (code != 0) { + return code; + } + SUdfcUvSession *session = handle; + code = doCallUdfScalarFunc(handle, input, numOfCols, output); + if (output->columnData == NULL) { + fnError("udfc scalar function calculate error. no column data"); + code = TSDB_CODE_UDF_INVALID_OUTPUT_TYPE; } else { - fnError("no task is waiting for the response."); + if (session->outputType != output->columnData->info.type || session->outputLen != output->columnData->info.bytes) { + fnError("udfc scalar function calculate error. type mismatch. session type: %d(%d), output type: %d(%d)", session->outputType, + session->outputLen, output->columnData->info.type, output->columnData->info.bytes); + code = TSDB_CODE_UDF_INVALID_OUTPUT_TYPE; + } } - connBuf->buf = NULL; - connBuf->total = -1; - connBuf->len = 0; - connBuf->cap = 0; + releaseUdfFuncHandle(udfName); + return code; } -void udfcUvHandleError(SClientUvConn *conn) { - while (!QUEUE_EMPTY(&conn->taskQueue)) { - QUEUE* h = QUEUE_HEAD(&conn->taskQueue); - SClientUvTaskNode *task = QUEUE_DATA(h, SClientUvTaskNode, connTaskQueue); - task->errCode = TSDB_CODE_UDF_PIPE_READ_ERR; - QUEUE_REMOVE(&task->connTaskQueue); - QUEUE_REMOVE(&task->procTaskQueue); - uv_sem_post(&task->taskSem); +bool udfAggGetEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv) { + if (fmIsScalarFunc(pFunc->funcId)) { + return false; } - - uv_close((uv_handle_t *) conn->pipe, onUdfcPipeClose); + pEnv->calcMemSize = sizeof(SUdfAggRes) + pFunc->node.resType.bytes + pFunc->udfBufSize; + return true; } -void onUdfcPipeRead(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf) { - fnTrace("udfc client %p, client read from pipe. nread: %zd", client, nread); - if (nread == 0) return; - - SClientUvConn *conn = client->data; - SClientConnBuf *connBuf = &conn->readBuf; - if (nread > 0) { - connBuf->len += nread; - if (isUdfcUvMsgComplete(connBuf)) { - udfcUvHandleRsp(conn); - } - +bool udfAggInit(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo* pResultCellInfo) { + if (functionSetup(pCtx, pResultCellInfo) != true) { + return false; } - if (nread < 0) { - fnError("udfc client pipe %p read error: %zd, %s.", client, nread, uv_strerror(nread)); - if (nread == UV_EOF) { - fnError("\tudfc client pipe %p closed", client); - } - udfcUvHandleError(conn); + UdfcFuncHandle handle; + int32_t udfCode = 0; + if ((udfCode = acquireUdfFuncHandle((char *)pCtx->udfName, &handle)) != 0) { + fnError("udfAggInit error. step doSetupUdf. udf code: %d", udfCode); + return false; + } + SUdfcUvSession *session = (SUdfcUvSession *)handle; + SUdfAggRes *udfRes = (SUdfAggRes*)GET_ROWCELL_INTERBUF(pResultCellInfo); + int32_t envSize = sizeof(SUdfAggRes) + session->outputLen + session->bufSize; + memset(udfRes, 0, envSize); + + udfRes->finalResBuf = (char*)udfRes + sizeof(SUdfAggRes); + udfRes->interResBuf = (char*)udfRes + sizeof(SUdfAggRes) + session->outputLen; + + SUdfInterBuf buf = {0}; + if ((udfCode = doCallUdfAggInit(handle, &buf)) != 0) { + fnError("udfAggInit error. step doCallUdfAggInit. udf code: %d", udfCode); + releaseUdfFuncHandle(pCtx->udfName); + return false; + } + udfRes->interResNum = buf.numOfResult; + if (buf.bufLen <= session->bufSize) { + memcpy(udfRes->interResBuf, buf.buf, buf.bufLen); + } else { + fnError("udfc inter buf size %d is greater than function bufSize %d", buf.bufLen, session->bufSize); + releaseUdfFuncHandle(pCtx->udfName); + return false; + } + releaseUdfFuncHandle(pCtx->udfName); + freeUdfInterBuf(&buf); + return true; +} + +int32_t udfAggProcess(struct SqlFunctionCtx *pCtx) { + int32_t udfCode = 0; + UdfcFuncHandle handle = 0; + if ((udfCode = acquireUdfFuncHandle((char *)pCtx->udfName, &handle)) != 0) { + fnError("udfAggProcess error. step acquireUdfFuncHandle. udf code: %d", udfCode); + return udfCode; + } + + SUdfcUvSession *session = handle; + SUdfAggRes* udfRes = (SUdfAggRes *)GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); + udfRes->finalResBuf = (char*)udfRes + sizeof(SUdfAggRes); + udfRes->interResBuf = (char*)udfRes + sizeof(SUdfAggRes) + session->outputLen; + + SInputColumnInfoData* pInput = &pCtx->input; + int32_t numOfCols = pInput->numOfInputCols; + int32_t start = pInput->startRowIndex; + int32_t numOfRows = pInput->numOfRows; + + + SSDataBlock tempBlock = {0}; + tempBlock.info.numOfCols = numOfCols; + tempBlock.info.rows = pInput->totalRows; + tempBlock.info.uid = pInput->uid; + bool hasVarCol = false; + tempBlock.pDataBlock = taosArrayInit(numOfCols, sizeof(SColumnInfoData)); + + for (int32_t i = 0; i < numOfCols; ++i) { + SColumnInfoData *col = pInput->pData[i]; + if (IS_VAR_DATA_TYPE(col->info.type)) { + hasVarCol = true; + } + taosArrayPush(tempBlock.pDataBlock, col); + } + tempBlock.info.hasVarCol = hasVarCol; + + SSDataBlock *inputBlock = blockDataExtractBlock(&tempBlock, start, numOfRows); + + SUdfInterBuf state = {.buf = udfRes->interResBuf, + .bufLen = session->bufSize, + .numOfResult = udfRes->interResNum}; + SUdfInterBuf newState = {0}; + + udfCode = doCallUdfAggProcess(session, inputBlock, &state, &newState); + if (udfCode != 0) { + fnError("udfAggProcess error. code: %d", udfCode); + newState.numOfResult = 0; + } else { + udfRes->interResNum = newState.numOfResult; + if (newState.bufLen <= session->bufSize) { + memcpy(udfRes->interResBuf, newState.buf, newState.bufLen); + } else { + fnError("udfc inter buf size %d is greater than function bufSize %d", newState.bufLen, session->bufSize); + udfCode = TSDB_CODE_UDF_INVALID_BUFSIZE; + } + } + if (newState.numOfResult == 1 || state.numOfResult == 1) { + GET_RES_INFO(pCtx)->numOfRes = 1; + } + + blockDataDestroy(inputBlock); + taosArrayDestroy(tempBlock.pDataBlock); + + releaseUdfFuncHandle(pCtx->udfName); + freeUdfInterBuf(&newState); + return udfCode; +} + +int32_t udfAggFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock* pBlock) { + int32_t udfCode = 0; + UdfcFuncHandle handle = 0; + if ((udfCode = acquireUdfFuncHandle((char *)pCtx->udfName, &handle)) != 0) { + fnError("udfAggProcess error. step acquireUdfFuncHandle. udf code: %d", udfCode); + return udfCode; + } + + SUdfcUvSession *session = handle; + SUdfAggRes* udfRes = (SUdfAggRes *)GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); + udfRes->finalResBuf = (char*)udfRes + sizeof(SUdfAggRes); + udfRes->interResBuf = (char*)udfRes + sizeof(SUdfAggRes) + session->outputLen; + + + SUdfInterBuf resultBuf = {0}; + SUdfInterBuf state = {.buf = udfRes->interResBuf, + .bufLen = session->bufSize, + .numOfResult = udfRes->interResNum}; + int32_t udfCallCode= 0; + udfCallCode= doCallUdfAggFinalize(session, &state, &resultBuf); + if (udfCallCode != 0) { + fnError("udfAggFinalize error. doCallUdfAggFinalize step. udf code:%d", udfCallCode); + GET_RES_INFO(pCtx)->numOfRes = 0; + } else { + if (resultBuf.bufLen <= session->outputLen) { + memcpy(udfRes->finalResBuf, resultBuf.buf, session->outputLen); + udfRes->finalResNum = resultBuf.numOfResult; + GET_RES_INFO(pCtx)->numOfRes = udfRes->finalResNum; + } else { + fnError("udfc inter buf size %d is greater than function output size %d", resultBuf.bufLen, session->outputLen); + GET_RES_INFO(pCtx)->numOfRes = 0; + udfCallCode = TSDB_CODE_UDF_INVALID_OUTPUT_TYPE; + } + } + + freeUdfInterBuf(&resultBuf); + + int32_t numOfResults = functionFinalizeWithResultBuf(pCtx, pBlock, udfRes->finalResBuf); + releaseUdfFuncHandle(pCtx->udfName); + return udfCallCode == 0 ? numOfResults : udfCallCode; +} + +void onUdfcPipeClose(uv_handle_t *handle) { + SClientUvConn *conn = handle->data; + if (!QUEUE_EMPTY(&conn->taskQueue)) { + QUEUE* h = QUEUE_HEAD(&conn->taskQueue); + SClientUvTaskNode *task = QUEUE_DATA(h, SClientUvTaskNode, connTaskQueue); + task->errCode = 0; + QUEUE_REMOVE(&task->procTaskQueue); + uv_sem_post(&task->taskSem); + } + conn->session->udfUvPipe = NULL; + taosMemoryFree(conn->readBuf.buf); + taosMemoryFree(conn); + taosMemoryFree((uv_pipe_t *) handle); +} + +int32_t udfcGetUdfTaskResultFromUvTask(SClientUdfTask *task, SClientUvTaskNode *uvTask) { + fnDebug("udfc get uv task result. task: %p, uvTask: %p", task, uvTask); + if (uvTask->type == UV_TASK_REQ_RSP) { + if (uvTask->rspBuf.base != NULL) { + SUdfResponse rsp = {0}; + void* buf = decodeUdfResponse(uvTask->rspBuf.base, &rsp); + assert(uvTask->rspBuf.len == POINTER_DISTANCE(buf, uvTask->rspBuf.base)); + task->errCode = rsp.code; + + switch (task->type) { + case UDF_TASK_SETUP: { + task->_setup.rsp = rsp.setupRsp; + break; + } + case UDF_TASK_CALL: { + task->_call.rsp = rsp.callRsp; + break; + } + case UDF_TASK_TEARDOWN: { + task->_teardown.rsp = rsp.teardownRsp; + break; + } + default: { + break; + } + } + + // TODO: the call buffer is setup and freed by udf invocation + taosMemoryFree(uvTask->rspBuf.base); + } else { + task->errCode = uvTask->errCode; + } + } else if (uvTask->type == UV_TASK_CONNECT) { + task->errCode = uvTask->errCode; + } else if (uvTask->type == UV_TASK_DISCONNECT) { + task->errCode = uvTask->errCode; + } + return 0; +} + +void udfcAllocateBuffer(uv_handle_t *handle, size_t suggestedSize, uv_buf_t *buf) { + SClientUvConn *conn = handle->data; + SClientConnBuf *connBuf = &conn->readBuf; + + int32_t msgHeadSize = sizeof(int32_t) + sizeof(int64_t); + if (connBuf->cap == 0) { + connBuf->buf = taosMemoryMalloc(msgHeadSize); + if (connBuf->buf) { + connBuf->len = 0; + connBuf->cap = msgHeadSize; + connBuf->total = -1; + + buf->base = connBuf->buf; + buf->len = connBuf->cap; + } else { + fnError("udfc allocate buffer failure. size: %d", msgHeadSize); + buf->base = NULL; + buf->len = 0; + } + } else { + connBuf->cap = connBuf->total > connBuf->cap ? connBuf->total : connBuf->cap; + void *resultBuf = taosMemoryRealloc(connBuf->buf, connBuf->cap); + if (resultBuf) { + connBuf->buf = resultBuf; + buf->base = connBuf->buf + connBuf->len; + buf->len = connBuf->cap - connBuf->len; + } else { + fnError("udfc re-allocate buffer failure. size: %d", connBuf->cap); + buf->base = NULL; + buf->len = 0; + } + } + + fnTrace("conn buf cap - len - total : %d - %d - %d", connBuf->cap, connBuf->len, connBuf->total); + +} + +bool isUdfcUvMsgComplete(SClientConnBuf *connBuf) { + if (connBuf->total == -1 && connBuf->len >= sizeof(int32_t)) { + connBuf->total = *(int32_t *) (connBuf->buf); + } + if (connBuf->len == connBuf->cap && connBuf->total == connBuf->cap) { + fnTrace("udfc complete message is received, now handle it"); + return true; + } + return false; +} + +void udfcUvHandleRsp(SClientUvConn *conn) { + SClientConnBuf *connBuf = &conn->readBuf; + int64_t seqNum = *(int64_t *) (connBuf->buf + sizeof(int32_t)); // msglen then seqnum + + if (QUEUE_EMPTY(&conn->taskQueue)) { + fnError("udfc no task waiting for response on connection"); + return; + } + bool found = false; + SClientUvTaskNode *taskFound = NULL; + QUEUE* h = QUEUE_NEXT(&conn->taskQueue); + SClientUvTaskNode *task = QUEUE_DATA(h, SClientUvTaskNode, connTaskQueue); + + while (h != &conn->taskQueue) { + if (task->seqNum == seqNum) { + if (found == false) { + found = true; + taskFound = task; + } else { + fnError("udfc more than one task waiting for the same response"); + continue; + } + } + h = QUEUE_NEXT(h); + task = QUEUE_DATA(h, SClientUvTaskNode, connTaskQueue); + } + + if (taskFound) { + taskFound->rspBuf = uv_buf_init(connBuf->buf, connBuf->len); + QUEUE_REMOVE(&taskFound->connTaskQueue); + QUEUE_REMOVE(&taskFound->procTaskQueue); + uv_sem_post(&taskFound->taskSem); + } else { + fnError("no task is waiting for the response."); + } + connBuf->buf = NULL; + connBuf->total = -1; + connBuf->len = 0; + connBuf->cap = 0; +} + +void udfcUvHandleError(SClientUvConn *conn) { + while (!QUEUE_EMPTY(&conn->taskQueue)) { + QUEUE* h = QUEUE_HEAD(&conn->taskQueue); + SClientUvTaskNode *task = QUEUE_DATA(h, SClientUvTaskNode, connTaskQueue); + task->errCode = TSDB_CODE_UDF_PIPE_READ_ERR; + QUEUE_REMOVE(&task->connTaskQueue); + QUEUE_REMOVE(&task->procTaskQueue); + uv_sem_post(&task->taskSem); + } + + uv_close((uv_handle_t *) conn->pipe, onUdfcPipeClose); +} + +void onUdfcPipeRead(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf) { + fnTrace("udfc client %p, client read from pipe. nread: %zd", client, nread); + if (nread == 0) return; + + SClientUvConn *conn = client->data; + SClientConnBuf *connBuf = &conn->readBuf; + if (nread > 0) { + connBuf->len += nread; + if (isUdfcUvMsgComplete(connBuf)) { + udfcUvHandleRsp(conn); + } + + } + if (nread < 0) { + fnError("udfc client pipe %p read error: %zd, %s.", client, nread, uv_strerror(nread)); + if (nread == UV_EOF) { + fnError("\tudfc client pipe %p closed", client); + } + udfcUvHandleError(conn); } } @@ -1050,7 +1403,7 @@ int32_t udfcCreateUvTask(SClientUdfTask *task, int8_t uvTaskType, SClientUvTaskN request.teardown = task->_teardown.req; request.type = UDF_TASK_TEARDOWN; } else { - //TODO log and return error + fnError("udfc create uv task, invalid task type : %d", task->type); } int32_t bufLen = encodeUdfRequest(NULL, &request); request.msgLen = bufLen; @@ -1271,134 +1624,47 @@ int32_t udfcRunUdfUvTask(SClientUdfTask *task, int8_t uvTaskType) { SClientUvConn *conn = uvTask->pipe->data; conn->session = task->session; } - taosMemoryFree(uvTask); - uvTask = NULL; - return task->errCode; -} - -int32_t doSetupUdf(char udfName[], UdfcFuncHandle *funcHandle) { - if (gUdfdProxy.udfcState != UDFC_STATE_READY) { - return TSDB_CODE_UDF_INVALID_STATE; - } - SClientUdfTask *task = taosMemoryCalloc(1,sizeof(SClientUdfTask)); - task->errCode = 0; - task->session = taosMemoryCalloc(1, sizeof(SUdfcUvSession)); - task->session->udfc = &gUdfdProxy; - task->type = UDF_TASK_SETUP; - - SUdfSetupRequest *req = &task->_setup.req; - strncpy(req->udfName, udfName, TSDB_FUNC_NAME_LEN); - - int32_t errCode = udfcRunUdfUvTask(task, UV_TASK_CONNECT); - if (errCode != 0) { - fnError("failed to connect to pipe. udfName: %s, pipe: %s", udfName, (&gUdfdProxy)->udfdPipeName); - return TSDB_CODE_UDF_PIPE_CONNECT_ERR; - } - - udfcRunUdfUvTask(task, UV_TASK_REQ_RSP); - - SUdfSetupResponse *rsp = &task->_setup.rsp; - task->session->severHandle = rsp->udfHandle; - task->session->outputType = rsp->outputType; - task->session->outputLen = rsp->outputLen; - task->session->bufSize = rsp->bufSize; - strcpy(task->session->udfName, udfName); - if (task->errCode != 0) { - fnError("failed to setup udf. udfname: %s, err: %d", udfName, task->errCode) - } else { - fnInfo("sucessfully setup udf func handle. udfName: %s, handle: %p", udfName, task->session); - *funcHandle = task->session; - } - int32_t err = task->errCode; - taosMemoryFree(task); - return err; -} - -int compareUdfcFuncSub(const void* elem1, const void* elem2) { - SUdfcFuncStub *stub1 = (SUdfcFuncStub *)elem1; - SUdfcFuncStub *stub2 = (SUdfcFuncStub *)elem2; - return strcmp(stub1->udfName, stub2->udfName); -} - -int32_t acquireUdfFuncHandle(char* udfName, UdfcFuncHandle* pHandle) { - int32_t code = 0; - uv_mutex_lock(&gUdfdProxy.udfStubsMutex); - SUdfcFuncStub key = {0}; - strcpy(key.udfName, udfName); - int32_t stubIndex = taosArraySearchIdx(gUdfdProxy.udfStubs, &key, compareUdfcFuncSub, TD_EQ); - if (stubIndex != -1) { - SUdfcFuncStub *foundStub = taosArrayGet(gUdfdProxy.udfStubs, stubIndex); - UdfcFuncHandle handle = foundStub->handle; - if (handle != NULL && ((SUdfcUvSession*)handle)->udfUvPipe != NULL) { - *pHandle = foundStub->handle; - ++foundStub->refCount; - foundStub->lastRefTime = taosGetTimestampUs(); - uv_mutex_unlock(&gUdfdProxy.udfStubsMutex); - return 0; - } else { - fnInfo("invalid handle for %s, refCount: %d, last ref time: %"PRId64". remove it from cache", - udfName, foundStub->refCount, foundStub->lastRefTime); - taosArrayRemove(gUdfdProxy.udfStubs, stubIndex); - } - } - *pHandle = NULL; - code = doSetupUdf(udfName, pHandle); - if (code == TSDB_CODE_SUCCESS) { - SUdfcFuncStub stub = {0}; - strcpy(stub.udfName, udfName); - stub.handle = *pHandle; - ++stub.refCount; - stub.lastRefTime = taosGetTimestampUs(); - taosArrayPush(gUdfdProxy.udfStubs, &stub); - taosArraySort(gUdfdProxy.udfStubs, compareUdfcFuncSub); - } else { - *pHandle = NULL; - } - - uv_mutex_unlock(&gUdfdProxy.udfStubsMutex); - return code; + taosMemoryFree(uvTask); + uvTask = NULL; + return task->errCode; } -void releaseUdfFuncHandle(char* udfName) { - uv_mutex_lock(&gUdfdProxy.udfStubsMutex); - SUdfcFuncStub key = {0}; - strcpy(key.udfName, udfName); - SUdfcFuncStub *foundStub = taosArraySearch(gUdfdProxy.udfStubs, &key, compareUdfcFuncSub, TD_EQ); - if (!foundStub) { - return; +int32_t doSetupUdf(char udfName[], UdfcFuncHandle *funcHandle) { + if (gUdfdProxy.udfcState != UDFC_STATE_READY) { + return TSDB_CODE_UDF_INVALID_STATE; } - if (foundStub->refCount > 0) { - --foundStub->refCount; + SClientUdfTask *task = taosMemoryCalloc(1,sizeof(SClientUdfTask)); + task->errCode = 0; + task->session = taosMemoryCalloc(1, sizeof(SUdfcUvSession)); + task->session->udfc = &gUdfdProxy; + task->type = UDF_TASK_SETUP; + + SUdfSetupRequest *req = &task->_setup.req; + strncpy(req->udfName, udfName, TSDB_FUNC_NAME_LEN); + + int32_t errCode = udfcRunUdfUvTask(task, UV_TASK_CONNECT); + if (errCode != 0) { + fnError("failed to connect to pipe. udfName: %s, pipe: %s", udfName, (&gUdfdProxy)->udfdPipeName); + return TSDB_CODE_UDF_PIPE_CONNECT_ERR; } - uv_mutex_unlock(&gUdfdProxy.udfStubsMutex); -} -int32_t cleanUpUdfs() { - uv_mutex_lock(&gUdfdProxy.udfStubsMutex); - int32_t i = 0; - SArray* udfStubs = taosArrayInit(16, sizeof(SUdfcFuncStub)); - while (i < taosArrayGetSize(gUdfdProxy.udfStubs)) { - SUdfcFuncStub *stub = taosArrayGet(gUdfdProxy.udfStubs, i); - if (stub->refCount == 0) { - fnInfo("tear down udf. udf name: %s, handle: %p, ref count: %d", stub->udfName, stub->handle, stub->refCount); - doTeardownUdf(stub->handle); - } else { - fnInfo("udf still in use. udf name: %s, ref count: %d, last ref time: %"PRId64", handle: %p", - stub->udfName, stub->refCount, stub->lastRefTime, stub->handle); - UdfcFuncHandle handle = stub->handle; - if (handle != NULL && ((SUdfcUvSession*)handle)->udfUvPipe != NULL) { - taosArrayPush(udfStubs, stub); - } else { - fnInfo("udf invalid handle for %s, refCount: %d, last ref time: %"PRId64". remove it from cache", - stub->udfName, stub->refCount, stub->lastRefTime); - } - } - ++i; + udfcRunUdfUvTask(task, UV_TASK_REQ_RSP); + + SUdfSetupResponse *rsp = &task->_setup.rsp; + task->session->severHandle = rsp->udfHandle; + task->session->outputType = rsp->outputType; + task->session->outputLen = rsp->outputLen; + task->session->bufSize = rsp->bufSize; + strcpy(task->session->udfName, udfName); + if (task->errCode != 0) { + fnError("failed to setup udf. udfname: %s, err: %d", udfName, task->errCode) + } else { + fnInfo("sucessfully setup udf func handle. udfName: %s, handle: %p", udfName, task->session); + *funcHandle = task->session; } - taosArrayDestroy(gUdfdProxy.udfStubs); - gUdfdProxy.udfStubs = udfStubs; - uv_mutex_unlock(&gUdfdProxy.udfStubsMutex); - return 0; + int32_t err = task->errCode; + taosMemoryFree(task); + return err; } int32_t callUdf(UdfcFuncHandle handle, int8_t callType, SSDataBlock *input, SUdfInterBuf *state, SUdfInterBuf *state2, @@ -1524,29 +1790,6 @@ int32_t doCallUdfScalarFunc(UdfcFuncHandle handle, SScalarParam *input, int32_t return err; } - -int32_t callUdfScalarFunc(char *udfName, SScalarParam *input, int32_t numOfCols, SScalarParam *output) { - UdfcFuncHandle handle = NULL; - int32_t code = acquireUdfFuncHandle(udfName, &handle); - if (code != 0) { - return code; - } - SUdfcUvSession *session = handle; - code = doCallUdfScalarFunc(handle, input, numOfCols, output); - if (output->columnData == NULL) { - fnError("udfc scalar function calculate error. no column data"); - code = TSDB_CODE_UDF_INVALID_OUTPUT_TYPE; - } else { - if (session->outputType != output->columnData->info.type || session->outputLen != output->columnData->info.bytes) { - fnError("udfc scalar function calculate error. type mismatch. session type: %d(%d), output type: %d(%d)", session->outputType, - session->outputLen, output->columnData->info.type, output->columnData->info.bytes); - code = TSDB_CODE_UDF_INVALID_OUTPUT_TYPE; - } - } - releaseUdfFuncHandle(udfName); - return code; -} - int32_t doTeardownUdf(UdfcFuncHandle handle) { SUdfcUvSession *session = (SUdfcUvSession *) handle; @@ -1576,165 +1819,3 @@ int32_t doTeardownUdf(UdfcFuncHandle handle) { return err; } - -//memory layout |---SUdfAggRes----|-----final result-----|---inter result----| -typedef struct SUdfAggRes { - int8_t finalResNum; - int8_t interResNum; - char* finalResBuf; - char* interResBuf; -} SUdfAggRes; - -bool udfAggGetEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv) { - if (fmIsScalarFunc(pFunc->funcId)) { - return false; - } - pEnv->calcMemSize = sizeof(SUdfAggRes) + pFunc->node.resType.bytes + pFunc->udfBufSize; - return true; -} - -bool udfAggInit(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo* pResultCellInfo) { - if (functionSetup(pCtx, pResultCellInfo) != true) { - return false; - } - UdfcFuncHandle handle; - int32_t udfCode = 0; - if ((udfCode = acquireUdfFuncHandle((char *)pCtx->udfName, &handle)) != 0) { - fnError("udfAggInit error. step doSetupUdf. udf code: %d", udfCode); - return false; - } - SUdfcUvSession *session = (SUdfcUvSession *)handle; - SUdfAggRes *udfRes = (SUdfAggRes*)GET_ROWCELL_INTERBUF(pResultCellInfo); - int32_t envSize = sizeof(SUdfAggRes) + session->outputLen + session->bufSize; - memset(udfRes, 0, envSize); - - udfRes->finalResBuf = (char*)udfRes + sizeof(SUdfAggRes); - udfRes->interResBuf = (char*)udfRes + sizeof(SUdfAggRes) + session->outputLen; - - SUdfInterBuf buf = {0}; - if ((udfCode = doCallUdfAggInit(handle, &buf)) != 0) { - fnError("udfAggInit error. step doCallUdfAggInit. udf code: %d", udfCode); - releaseUdfFuncHandle(pCtx->udfName); - return false; - } - udfRes->interResNum = buf.numOfResult; - if (buf.bufLen <= session->bufSize) { - memcpy(udfRes->interResBuf, buf.buf, buf.bufLen); - } else { - fnError("udfc inter buf size %d is greater than function bufSize %d", buf.bufLen, session->bufSize); - releaseUdfFuncHandle(pCtx->udfName); - return false; - } - releaseUdfFuncHandle(pCtx->udfName); - freeUdfInterBuf(&buf); - return true; -} - -int32_t udfAggProcess(struct SqlFunctionCtx *pCtx) { - int32_t udfCode = 0; - UdfcFuncHandle handle = 0; - if ((udfCode = acquireUdfFuncHandle((char *)pCtx->udfName, &handle)) != 0) { - fnError("udfAggProcess error. step acquireUdfFuncHandle. udf code: %d", udfCode); - return udfCode; - } - - SUdfcUvSession *session = handle; - SUdfAggRes* udfRes = (SUdfAggRes *)GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); - udfRes->finalResBuf = (char*)udfRes + sizeof(SUdfAggRes); - udfRes->interResBuf = (char*)udfRes + sizeof(SUdfAggRes) + session->outputLen; - - SInputColumnInfoData* pInput = &pCtx->input; - int32_t numOfCols = pInput->numOfInputCols; - int32_t start = pInput->startRowIndex; - int32_t numOfRows = pInput->numOfRows; - - - SSDataBlock tempBlock = {0}; - tempBlock.info.numOfCols = numOfCols; - tempBlock.info.rows = pInput->totalRows; - tempBlock.info.uid = pInput->uid; - bool hasVarCol = false; - tempBlock.pDataBlock = taosArrayInit(numOfCols, sizeof(SColumnInfoData)); - - for (int32_t i = 0; i < numOfCols; ++i) { - SColumnInfoData *col = pInput->pData[i]; - if (IS_VAR_DATA_TYPE(col->info.type)) { - hasVarCol = true; - } - taosArrayPush(tempBlock.pDataBlock, col); - } - tempBlock.info.hasVarCol = hasVarCol; - - SSDataBlock *inputBlock = blockDataExtractBlock(&tempBlock, start, numOfRows); - - SUdfInterBuf state = {.buf = udfRes->interResBuf, - .bufLen = session->bufSize, - .numOfResult = udfRes->interResNum}; - SUdfInterBuf newState = {0}; - - udfCode = doCallUdfAggProcess(session, inputBlock, &state, &newState); - if (udfCode != 0) { - fnError("udfAggProcess error. code: %d", udfCode); - newState.numOfResult = 0; - } else { - udfRes->interResNum = newState.numOfResult; - if (newState.bufLen <= session->bufSize) { - memcpy(udfRes->interResBuf, newState.buf, newState.bufLen); - } else { - fnError("udfc inter buf size %d is greater than function bufSize %d", newState.bufLen, session->bufSize); - udfCode = TSDB_CODE_UDF_INVALID_BUFSIZE; - } - } - if (newState.numOfResult == 1 || state.numOfResult == 1) { - GET_RES_INFO(pCtx)->numOfRes = 1; - } - - blockDataDestroy(inputBlock); - taosArrayDestroy(tempBlock.pDataBlock); - - releaseUdfFuncHandle(pCtx->udfName); - freeUdfInterBuf(&newState); - return udfCode; -} - -int32_t udfAggFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock* pBlock) { - int32_t udfCode = 0; - UdfcFuncHandle handle = 0; - if ((udfCode = acquireUdfFuncHandle((char *)pCtx->udfName, &handle)) != 0) { - fnError("udfAggProcess error. step acquireUdfFuncHandle. udf code: %d", udfCode); - return udfCode; - } - - SUdfcUvSession *session = handle; - SUdfAggRes* udfRes = (SUdfAggRes *)GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); - udfRes->finalResBuf = (char*)udfRes + sizeof(SUdfAggRes); - udfRes->interResBuf = (char*)udfRes + sizeof(SUdfAggRes) + session->outputLen; - - - SUdfInterBuf resultBuf = {0}; - SUdfInterBuf state = {.buf = udfRes->interResBuf, - .bufLen = session->bufSize, - .numOfResult = udfRes->interResNum}; - int32_t udfCallCode= 0; - udfCallCode= doCallUdfAggFinalize(session, &state, &resultBuf); - if (udfCallCode != 0) { - fnError("udfAggFinalize error. doCallUdfAggFinalize step. udf code:%d", udfCallCode); - GET_RES_INFO(pCtx)->numOfRes = 0; - } else { - if (resultBuf.bufLen <= session->outputLen) { - memcpy(udfRes->finalResBuf, resultBuf.buf, session->outputLen); - udfRes->finalResNum = resultBuf.numOfResult; - GET_RES_INFO(pCtx)->numOfRes = udfRes->finalResNum; - } else { - fnError("udfc inter buf size %d is greater than function output size %d", resultBuf.bufLen, session->outputLen); - GET_RES_INFO(pCtx)->numOfRes = 0; - udfCallCode = TSDB_CODE_UDF_INVALID_OUTPUT_TYPE; - } - } - - freeUdfInterBuf(&resultBuf); - - int32_t numOfResults = functionFinalizeWithResultBuf(pCtx, pBlock, udfRes->finalResBuf); - releaseUdfFuncHandle(pCtx->udfName); - return udfCallCode == 0 ? numOfResults : udfCallCode; -} diff --git a/source/libs/function/src/udfd.c b/source/libs/function/src/udfd.c index 9185f707116b54c8493843a3a71bd343ae4ac4d0..83dcb6d7f010936f46e1f9d375be517d8d06432b 100644 --- a/source/libs/function/src/udfd.c +++ b/source/libs/function/src/udfd.c @@ -103,177 +103,66 @@ typedef struct SUdfdRpcSendRecvInfo { uv_sem_t resultSem; } SUdfdRpcSendRecvInfo; -void udfdProcessRpcRsp(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet) { - SUdfdRpcSendRecvInfo *msgInfo = (SUdfdRpcSendRecvInfo *)pMsg->info.ahandle; - ASSERT(pMsg->info.ahandle != NULL); - - if (pEpSet) { - if (!isEpsetEqual(&global.mgmtEp.epSet, pEpSet)) { - updateEpSet_s(&global.mgmtEp, pEpSet); - } - } +static void udfdProcessRpcRsp(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet); +static int32_t udfdFillUdfInfoFromMNode(void *clientRpc, char *udfName, SUdf *udf); +static int32_t udfdConnectToMnode(); +static int32_t udfdLoadUdf(char *udfName, SUdf *udf); +static bool udfdRpcRfp(int32_t code); +static int initEpSetFromCfg(const char *firstEp, const char *secondEp, SCorEpSet *pEpSet); +static int32_t udfdOpenClientRpc(); +static int32_t udfdCloseClientRpc(); + +static void udfdProcessSetupRequest(SUvUdfWork *uvUdf, SUdfRequest *request); +static void udfdProcessCallRequest(SUvUdfWork *uvUdf, SUdfRequest *request); +static void udfdProcessTeardownRequest(SUvUdfWork *uvUdf, SUdfRequest *request); +static void udfdProcessRequest(uv_work_t *req); +static void udfdOnWrite(uv_write_t *req, int status); +static void udfdSendResponse(uv_work_t *work, int status); +static void udfdAllocBuffer(uv_handle_t *handle, size_t suggestedSize, uv_buf_t *buf); +static bool isUdfdUvMsgComplete(SUdfdUvConn *pipe); +static void udfdHandleRequest(SUdfdUvConn *conn); +static void udfdPipeCloseCb(uv_handle_t *pipe); +static void udfdUvHandleError(SUdfdUvConn *conn) { uv_close((uv_handle_t *)conn->client, udfdPipeCloseCb); } +static void udfdPipeRead(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf); +static void udfdOnNewConnection(uv_stream_t *server, int status); + +static void udfdIntrSignalHandler(uv_signal_t *handle, int signum); +static int32_t removeListeningPipe(); + +static void udfdPrintVersion(); +static int32_t udfdParseArgs(int32_t argc, char *argv[]); +static int32_t udfdInitLog(); + +static void udfdCtrlAllocBufCb(uv_handle_t *handle, size_t suggested_size, uv_buf_t *buf); +static void udfdCtrlReadCb(uv_stream_t *q, ssize_t nread, const uv_buf_t *buf); +static int32_t udfdUvInit(); +static void udfdCloseWalkCb(uv_handle_t *handle, void *arg); +static int32_t udfdRun(); +static void udfdConnectMnodeThreadFunc(void* args); - if (pMsg->code != TSDB_CODE_SUCCESS) { - fnError("udfd rpc error. code: %s", tstrerror(pMsg->code)); - msgInfo->code = pMsg->code; - goto _return; - } +void udfdProcessRequest(uv_work_t *req) { + SUvUdfWork *uvUdf = (SUvUdfWork *)(req->data); + SUdfRequest request = {0}; + decodeUdfRequest(uvUdf->input.base, &request); - if (msgInfo->rpcType == UDFD_RPC_MNODE_CONNECT) { - SConnectRsp connectRsp = {0}; - tDeserializeSConnectRsp(pMsg->pCont, pMsg->contLen, &connectRsp); - if (connectRsp.epSet.numOfEps == 0) { - msgInfo->code = TSDB_CODE_MND_APP_ERROR; - goto _return; + switch (request.type) { + case UDF_TASK_SETUP: { + udfdProcessSetupRequest(uvUdf, &request); + break; } - if (connectRsp.dnodeNum > 1 && !isEpsetEqual(&global.mgmtEp.epSet, &connectRsp.epSet)) { - updateEpSet_s(&global.mgmtEp, &connectRsp.epSet); + case UDF_TASK_CALL: { + udfdProcessCallRequest(uvUdf, &request); + break; + } + case UDF_TASK_TEARDOWN: { + udfdProcessTeardownRequest(uvUdf, &request); + break; + } + default: { + break; } - msgInfo->code = 0; - } else if (msgInfo->rpcType == UDFD_RPC_RETRIVE_FUNC) { - SRetrieveFuncRsp retrieveRsp = {0}; - tDeserializeSRetrieveFuncRsp(pMsg->pCont, pMsg->contLen, &retrieveRsp); - - SFuncInfo *pFuncInfo = (SFuncInfo *)taosArrayGet(retrieveRsp.pFuncInfos, 0); - SUdf * udf = msgInfo->param; - udf->funcType = pFuncInfo->funcType; - udf->scriptType = pFuncInfo->scriptType; - udf->outputType = pFuncInfo->outputType; - udf->outputLen = pFuncInfo->outputLen; - udf->bufSize = pFuncInfo->bufSize; - - char path[PATH_MAX] = {0}; - snprintf(path, sizeof(path), "%s/lib%s.so", "/tmp", pFuncInfo->name); - TdFilePtr file = - taosOpenFile(path, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_READ | TD_FILE_TRUNC | TD_FILE_AUTO_DEL); - // TODO check for failure of flush to disk - taosWriteFile(file, pFuncInfo->pCode, pFuncInfo->codeSize); - taosCloseFile(&file); - strncpy(udf->path, path, strlen(path)); - tFreeSFuncInfo(pFuncInfo); - taosArrayDestroy(retrieveRsp.pFuncInfos); - msgInfo->code = 0; - } - -_return: - rpcFreeCont(pMsg->pCont); - uv_sem_post(&msgInfo->resultSem); - return; -} - -int32_t udfdFillUdfInfoFromMNode(void *clientRpc, char *udfName, SUdf *udf) { - SRetrieveFuncReq retrieveReq = {0}; - retrieveReq.numOfFuncs = 1; - retrieveReq.pFuncNames = taosArrayInit(1, TSDB_FUNC_NAME_LEN); - taosArrayPush(retrieveReq.pFuncNames, udfName); - - int32_t contLen = tSerializeSRetrieveFuncReq(NULL, 0, &retrieveReq); - void * pReq = rpcMallocCont(contLen); - tSerializeSRetrieveFuncReq(pReq, contLen, &retrieveReq); - taosArrayDestroy(retrieveReq.pFuncNames); - - SUdfdRpcSendRecvInfo *msgInfo = taosMemoryCalloc(1, sizeof(SUdfdRpcSendRecvInfo)); - msgInfo->rpcType = UDFD_RPC_RETRIVE_FUNC; - msgInfo->param = udf; - uv_sem_init(&msgInfo->resultSem, 0); - - SRpcMsg rpcMsg = {0}; - rpcMsg.pCont = pReq; - rpcMsg.contLen = contLen; - rpcMsg.msgType = TDMT_MND_RETRIEVE_FUNC; - rpcMsg.info.ahandle = msgInfo; - rpcSendRequest(clientRpc, &global.mgmtEp.epSet, &rpcMsg, NULL); - - uv_sem_wait(&msgInfo->resultSem); - uv_sem_destroy(&msgInfo->resultSem); - int32_t code = msgInfo->code; - taosMemoryFree(msgInfo); - return code; -} - -int32_t udfdConnectToMnode() { - SConnectReq connReq = {0}; - connReq.connType = CONN_TYPE__UDFD; - tstrncpy(connReq.app, "udfd", sizeof(connReq.app)); - tstrncpy(connReq.user, TSDB_DEFAULT_USER, sizeof(connReq.user)); - char pass[TSDB_PASSWORD_LEN + 1] = {0}; - taosEncryptPass_c((uint8_t *)(TSDB_DEFAULT_PASS), strlen(TSDB_DEFAULT_PASS), pass); - tstrncpy(connReq.passwd, pass, sizeof(connReq.passwd)); - connReq.pid = htonl(taosGetPId()); - connReq.startTime = htobe64(taosGetTimestampMs()); - - int32_t contLen = tSerializeSConnectReq(NULL, 0, &connReq); - void * pReq = rpcMallocCont(contLen); - tSerializeSConnectReq(pReq, contLen, &connReq); - - SUdfdRpcSendRecvInfo *msgInfo = taosMemoryCalloc(1, sizeof(SUdfdRpcSendRecvInfo)); - msgInfo->rpcType = UDFD_RPC_MNODE_CONNECT; - uv_sem_init(&msgInfo->resultSem, 0); - - SRpcMsg rpcMsg = {0}; - rpcMsg.msgType = TDMT_MND_CONNECT; - rpcMsg.pCont = pReq; - rpcMsg.contLen = contLen; - rpcMsg.info.ahandle = msgInfo; - rpcSendRequest(global.clientRpc, &global.mgmtEp.epSet, &rpcMsg, NULL); - - uv_sem_wait(&msgInfo->resultSem); - int32_t code = msgInfo->code; - uv_sem_destroy(&msgInfo->resultSem); - taosMemoryFree(msgInfo); - return code; -} - -int32_t udfdLoadUdf(char *udfName, SUdf *udf) { - strcpy(udf->name, udfName); - int32_t err = 0; - - err = udfdFillUdfInfoFromMNode(global.clientRpc, udf->name, udf); - if (err != 0) { - fnError("can not retrieve udf from mnode. udf name %s", udfName); - return TSDB_CODE_UDF_LOAD_UDF_FAILURE; - } - - err = uv_dlopen(udf->path, &udf->lib); - if (err != 0) { - fnError("can not load library %s. error: %s", udf->path, uv_strerror(err)); - return TSDB_CODE_UDF_LOAD_UDF_FAILURE; - } - - char initFuncName[TSDB_FUNC_NAME_LEN + 5] = {0}; - char *initSuffix = "_init"; - strcpy(initFuncName, udfName); - strncat(initFuncName, initSuffix, strlen(initSuffix)); - uv_dlsym(&udf->lib, initFuncName, (void **)(&udf->initFunc)); - - char destroyFuncName[TSDB_FUNC_NAME_LEN + 5] = {0}; - char *destroySuffix = "_destroy"; - strcpy(destroyFuncName, udfName); - strncat(destroyFuncName, destroySuffix, strlen(destroySuffix)); - uv_dlsym(&udf->lib, destroyFuncName, (void **)(&udf->destroyFunc)); - - if (udf->funcType == TSDB_FUNC_TYPE_SCALAR) { - char processFuncName[TSDB_FUNC_NAME_LEN] = {0}; - strcpy(processFuncName, udfName); - uv_dlsym(&udf->lib, processFuncName, (void **)(&udf->scalarProcFunc)); - } else if (udf->funcType == TSDB_FUNC_TYPE_AGGREGATE) { - char processFuncName[TSDB_FUNC_NAME_LEN] = {0}; - strcpy(processFuncName, udfName); - uv_dlsym(&udf->lib, processFuncName, (void **)(&udf->aggProcFunc)); - char startFuncName[TSDB_FUNC_NAME_LEN + 6] = {0}; - char *startSuffix = "_start"; - strncpy(startFuncName, processFuncName, strlen(processFuncName)); - strncat(startFuncName, startSuffix, strlen(startSuffix)); - uv_dlsym(&udf->lib, startFuncName, (void **)(&udf->aggStartFunc)); - char finishFuncName[TSDB_FUNC_NAME_LEN + 7] = {0}; - char *finishSuffix = "_finish"; - strncpy(finishFuncName, processFuncName, strlen(processFuncName)); - strncat(finishFuncName, finishSuffix, strlen(finishSuffix)); - uv_dlsym(&udf->lib, finishFuncName, (void **)(&udf->aggFinishFunc)); - // TODO: merge } - return 0; } void udfdProcessSetupRequest(SUvUdfWork *uvUdf, SUdfRequest *request) { @@ -424,76 +313,300 @@ void udfdProcessCallRequest(SUvUdfWork *uvUdf, SUdfRequest *request) { default: break; } - - taosMemoryFree(uvUdf->input.base); - return; + + taosMemoryFree(uvUdf->input.base); + return; +} + +void udfdProcessTeardownRequest(SUvUdfWork *uvUdf, SUdfRequest *request) { + SUdfTeardownRequest *teardown = &request->teardown; + fnInfo("teardown. seq number: %" PRId64 ", handle:%" PRIx64, request->seqNum, teardown->udfHandle); + SUdfcFuncHandle *handle = (SUdfcFuncHandle *)(teardown->udfHandle); + SUdf * udf = handle->udf; + bool unloadUdf = false; + int32_t code = TSDB_CODE_SUCCESS; + + uv_mutex_lock(&global.udfsMutex); + udf->refCount--; + if (udf->refCount == 0) { + unloadUdf = true; + taosHashRemove(global.udfsHash, udf->name, strlen(udf->name)); + } + uv_mutex_unlock(&global.udfsMutex); + if (unloadUdf) { + uv_cond_destroy(&udf->condReady); + uv_mutex_destroy(&udf->lock); + if (udf->destroyFunc) { + (udf->destroyFunc)(); + } + uv_dlclose(&udf->lib); + taosMemoryFree(udf); + } + taosMemoryFree(handle); + + SUdfResponse response; + SUdfResponse *rsp = &response; + rsp->seqNum = request->seqNum; + rsp->type = request->type; + rsp->code = code; + int32_t len = encodeUdfResponse(NULL, rsp); + rsp->msgLen = len; + void *bufBegin = taosMemoryMalloc(len); + void *buf = bufBegin; + encodeUdfResponse(&buf, rsp); + uvUdf->output = uv_buf_init(bufBegin, len); + + taosMemoryFree(uvUdf->input.base); + return; +} + +void udfdProcessRpcRsp(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet) { + SUdfdRpcSendRecvInfo *msgInfo = (SUdfdRpcSendRecvInfo *)pMsg->info.ahandle; + ASSERT(pMsg->info.ahandle != NULL); + + if (pEpSet) { + if (!isEpsetEqual(&global.mgmtEp.epSet, pEpSet)) { + updateEpSet_s(&global.mgmtEp, pEpSet); + } + } + + if (pMsg->code != TSDB_CODE_SUCCESS) { + fnError("udfd rpc error. code: %s", tstrerror(pMsg->code)); + msgInfo->code = pMsg->code; + goto _return; + } + + if (msgInfo->rpcType == UDFD_RPC_MNODE_CONNECT) { + SConnectRsp connectRsp = {0}; + tDeserializeSConnectRsp(pMsg->pCont, pMsg->contLen, &connectRsp); + if (connectRsp.epSet.numOfEps == 0) { + msgInfo->code = TSDB_CODE_MND_APP_ERROR; + goto _return; + } + + if (connectRsp.dnodeNum > 1 && !isEpsetEqual(&global.mgmtEp.epSet, &connectRsp.epSet)) { + updateEpSet_s(&global.mgmtEp, &connectRsp.epSet); + } + msgInfo->code = 0; + } else if (msgInfo->rpcType == UDFD_RPC_RETRIVE_FUNC) { + SRetrieveFuncRsp retrieveRsp = {0}; + tDeserializeSRetrieveFuncRsp(pMsg->pCont, pMsg->contLen, &retrieveRsp); + + SFuncInfo *pFuncInfo = (SFuncInfo *)taosArrayGet(retrieveRsp.pFuncInfos, 0); + SUdf * udf = msgInfo->param; + udf->funcType = pFuncInfo->funcType; + udf->scriptType = pFuncInfo->scriptType; + udf->outputType = pFuncInfo->outputType; + udf->outputLen = pFuncInfo->outputLen; + udf->bufSize = pFuncInfo->bufSize; + + char path[PATH_MAX] = {0}; + snprintf(path, sizeof(path), "%s/lib%s.so", TD_TMP_DIR_PATH, pFuncInfo->name); + TdFilePtr file = + taosOpenFile(path, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_READ | TD_FILE_TRUNC | TD_FILE_AUTO_DEL); + int64_t count = taosWriteFile(file, pFuncInfo->pCode, pFuncInfo->codeSize); + if (count != pFuncInfo->codeSize) { + fnError("udfd write udf shared library failed"); + msgInfo->code = TSDB_CODE_FILE_CORRUPTED; + } + taosCloseFile(&file); + strncpy(udf->path, path, strlen(path)); + tFreeSFuncInfo(pFuncInfo); + taosArrayDestroy(retrieveRsp.pFuncInfos); + msgInfo->code = 0; + } + +_return: + rpcFreeCont(pMsg->pCont); + uv_sem_post(&msgInfo->resultSem); + return; +} + +int32_t udfdFillUdfInfoFromMNode(void *clientRpc, char *udfName, SUdf *udf) { + SRetrieveFuncReq retrieveReq = {0}; + retrieveReq.numOfFuncs = 1; + retrieveReq.pFuncNames = taosArrayInit(1, TSDB_FUNC_NAME_LEN); + taosArrayPush(retrieveReq.pFuncNames, udfName); + + int32_t contLen = tSerializeSRetrieveFuncReq(NULL, 0, &retrieveReq); + void * pReq = rpcMallocCont(contLen); + tSerializeSRetrieveFuncReq(pReq, contLen, &retrieveReq); + taosArrayDestroy(retrieveReq.pFuncNames); + + SUdfdRpcSendRecvInfo *msgInfo = taosMemoryCalloc(1, sizeof(SUdfdRpcSendRecvInfo)); + msgInfo->rpcType = UDFD_RPC_RETRIVE_FUNC; + msgInfo->param = udf; + uv_sem_init(&msgInfo->resultSem, 0); + + SRpcMsg rpcMsg = {0}; + rpcMsg.pCont = pReq; + rpcMsg.contLen = contLen; + rpcMsg.msgType = TDMT_MND_RETRIEVE_FUNC; + rpcMsg.info.ahandle = msgInfo; + rpcSendRequest(clientRpc, &global.mgmtEp.epSet, &rpcMsg, NULL); + + uv_sem_wait(&msgInfo->resultSem); + uv_sem_destroy(&msgInfo->resultSem); + int32_t code = msgInfo->code; + taosMemoryFree(msgInfo); + return code; +} + +int32_t udfdConnectToMnode() { + SConnectReq connReq = {0}; + connReq.connType = CONN_TYPE__UDFD; + tstrncpy(connReq.app, "udfd", sizeof(connReq.app)); + tstrncpy(connReq.user, TSDB_DEFAULT_USER, sizeof(connReq.user)); + char pass[TSDB_PASSWORD_LEN + 1] = {0}; + taosEncryptPass_c((uint8_t *)(TSDB_DEFAULT_PASS), strlen(TSDB_DEFAULT_PASS), pass); + tstrncpy(connReq.passwd, pass, sizeof(connReq.passwd)); + connReq.pid = htonl(taosGetPId()); + connReq.startTime = htobe64(taosGetTimestampMs()); + + int32_t contLen = tSerializeSConnectReq(NULL, 0, &connReq); + void * pReq = rpcMallocCont(contLen); + tSerializeSConnectReq(pReq, contLen, &connReq); + + SUdfdRpcSendRecvInfo *msgInfo = taosMemoryCalloc(1, sizeof(SUdfdRpcSendRecvInfo)); + msgInfo->rpcType = UDFD_RPC_MNODE_CONNECT; + uv_sem_init(&msgInfo->resultSem, 0); + + SRpcMsg rpcMsg = {0}; + rpcMsg.msgType = TDMT_MND_CONNECT; + rpcMsg.pCont = pReq; + rpcMsg.contLen = contLen; + rpcMsg.info.ahandle = msgInfo; + rpcSendRequest(global.clientRpc, &global.mgmtEp.epSet, &rpcMsg, NULL); + + uv_sem_wait(&msgInfo->resultSem); + int32_t code = msgInfo->code; + uv_sem_destroy(&msgInfo->resultSem); + taosMemoryFree(msgInfo); + return code; +} + +int32_t udfdLoadUdf(char *udfName, SUdf *udf) { + strcpy(udf->name, udfName); + int32_t err = 0; + + err = udfdFillUdfInfoFromMNode(global.clientRpc, udf->name, udf); + if (err != 0) { + fnError("can not retrieve udf from mnode. udf name %s", udfName); + return TSDB_CODE_UDF_LOAD_UDF_FAILURE; + } + + err = uv_dlopen(udf->path, &udf->lib); + if (err != 0) { + fnError("can not load library %s. error: %s", udf->path, uv_strerror(err)); + return TSDB_CODE_UDF_LOAD_UDF_FAILURE; + } + + char initFuncName[TSDB_FUNC_NAME_LEN + 5] = {0}; + char *initSuffix = "_init"; + strcpy(initFuncName, udfName); + strncat(initFuncName, initSuffix, strlen(initSuffix)); + uv_dlsym(&udf->lib, initFuncName, (void **)(&udf->initFunc)); + + char destroyFuncName[TSDB_FUNC_NAME_LEN + 5] = {0}; + char *destroySuffix = "_destroy"; + strcpy(destroyFuncName, udfName); + strncat(destroyFuncName, destroySuffix, strlen(destroySuffix)); + uv_dlsym(&udf->lib, destroyFuncName, (void **)(&udf->destroyFunc)); + + if (udf->funcType == TSDB_FUNC_TYPE_SCALAR) { + char processFuncName[TSDB_FUNC_NAME_LEN] = {0}; + strcpy(processFuncName, udfName); + uv_dlsym(&udf->lib, processFuncName, (void **)(&udf->scalarProcFunc)); + } else if (udf->funcType == TSDB_FUNC_TYPE_AGGREGATE) { + char processFuncName[TSDB_FUNC_NAME_LEN] = {0}; + strcpy(processFuncName, udfName); + uv_dlsym(&udf->lib, processFuncName, (void **)(&udf->aggProcFunc)); + char startFuncName[TSDB_FUNC_NAME_LEN + 6] = {0}; + char *startSuffix = "_start"; + strncpy(startFuncName, processFuncName, strlen(processFuncName)); + strncat(startFuncName, startSuffix, strlen(startSuffix)); + uv_dlsym(&udf->lib, startFuncName, (void **)(&udf->aggStartFunc)); + char finishFuncName[TSDB_FUNC_NAME_LEN + 7] = {0}; + char *finishSuffix = "_finish"; + strncpy(finishFuncName, processFuncName, strlen(processFuncName)); + strncat(finishFuncName, finishSuffix, strlen(finishSuffix)); + uv_dlsym(&udf->lib, finishFuncName, (void **)(&udf->aggFinishFunc)); + // TODO: merge + } + return 0; +} +static bool udfdRpcRfp(int32_t code) { + if (code == TSDB_CODE_RPC_REDIRECT) { + return true; + } else { + return false; + } } -void udfdProcessTeardownRequest(SUvUdfWork *uvUdf, SUdfRequest *request) { - SUdfTeardownRequest *teardown = &request->teardown; - fnInfo("teardown. seq number: %" PRId64 ", handle:%" PRIx64, request->seqNum, teardown->udfHandle); - SUdfcFuncHandle *handle = (SUdfcFuncHandle *)(teardown->udfHandle); - SUdf * udf = handle->udf; - bool unloadUdf = false; - int32_t code = TSDB_CODE_SUCCESS; +int initEpSetFromCfg(const char *firstEp, const char *secondEp, SCorEpSet *pEpSet) { + pEpSet->version = 0; - uv_mutex_lock(&global.udfsMutex); - udf->refCount--; - if (udf->refCount == 0) { - unloadUdf = true; - taosHashRemove(global.udfsHash, udf->name, strlen(udf->name)); + // init mnode ip set + SEpSet *mgmtEpSet = &(pEpSet->epSet); + mgmtEpSet->numOfEps = 0; + mgmtEpSet->inUse = 0; + + if (firstEp && firstEp[0] != 0) { + if (strlen(firstEp) >= TSDB_EP_LEN) { + terrno = TSDB_CODE_TSC_INVALID_FQDN; + return -1; + } + + int32_t code = taosGetFqdnPortFromEp(firstEp, &mgmtEpSet->eps[0]); + if (code != TSDB_CODE_SUCCESS) { + terrno = TSDB_CODE_TSC_INVALID_FQDN; + return terrno; + } + + mgmtEpSet->numOfEps++; } - uv_mutex_unlock(&global.udfsMutex); - if (unloadUdf) { - uv_cond_destroy(&udf->condReady); - uv_mutex_destroy(&udf->lock); - if (udf->destroyFunc) { - (udf->destroyFunc)(); + + if (secondEp && secondEp[0] != 0) { + if (strlen(secondEp) >= TSDB_EP_LEN) { + terrno = TSDB_CODE_TSC_INVALID_FQDN; + return -1; } - uv_dlclose(&udf->lib); - taosMemoryFree(udf); + + taosGetFqdnPortFromEp(secondEp, &mgmtEpSet->eps[mgmtEpSet->numOfEps]); + mgmtEpSet->numOfEps++; } - taosMemoryFree(handle); - SUdfResponse response; - SUdfResponse *rsp = &response; - rsp->seqNum = request->seqNum; - rsp->type = request->type; - rsp->code = code; - int32_t len = encodeUdfResponse(NULL, rsp); - rsp->msgLen = len; - void *bufBegin = taosMemoryMalloc(len); - void *buf = bufBegin; - encodeUdfResponse(&buf, rsp); - uvUdf->output = uv_buf_init(bufBegin, len); + if (mgmtEpSet->numOfEps == 0) { + terrno = TSDB_CODE_TSC_INVALID_FQDN; + return -1; + } - taosMemoryFree(uvUdf->input.base); - return; + return 0; } -void udfdProcessRequest(uv_work_t *req) { - SUvUdfWork *uvUdf = (SUvUdfWork *)(req->data); - SUdfRequest request = {0}; - decodeUdfRequest(uvUdf->input.base, &request); - - switch (request.type) { - case UDF_TASK_SETUP: { - udfdProcessSetupRequest(uvUdf, &request); - break; - } +int32_t udfdOpenClientRpc() { + SRpcInit rpcInit = {0}; + rpcInit.label = "UDFD"; + rpcInit.numOfThreads = 1; + rpcInit.cfp = (RpcCfp)udfdProcessRpcRsp; + rpcInit.sessions = 1024; + rpcInit.connType = TAOS_CONN_CLIENT; + rpcInit.idleTime = tsShellActivityTimer * 1000; + rpcInit.user = TSDB_DEFAULT_USER; + rpcInit.parent = &global; + rpcInit.rfp = udfdRpcRfp; - case UDF_TASK_CALL: { - udfdProcessCallRequest(uvUdf, &request); - break; - } - case UDF_TASK_TEARDOWN: { - udfdProcessTeardownRequest(uvUdf, &request); - break; - } - default: { - break; - } + global.clientRpc = rpcOpen(&rpcInit); + if (global.clientRpc == NULL) { + fnError("failed to init dnode rpc client"); + return -1; } + return 0; +} + +int32_t udfdCloseClientRpc() { + rpcClose(global.clientRpc); + return 0; } void udfdOnWrite(uv_write_t *req, int status) { @@ -529,7 +642,7 @@ void udfdAllocBuffer(uv_handle_t *handle, size_t suggestedSize, uv_buf_t *buf) { buf->base = ctx->inputBuf; buf->len = ctx->inputCap; } else { - // TODO: log error + fnError("udfd can not allocate enough memory") buf->base = NULL; buf->len = 0; } @@ -541,7 +654,7 @@ void udfdAllocBuffer(uv_handle_t *handle, size_t suggestedSize, uv_buf_t *buf) { buf->base = ctx->inputBuf + ctx->inputLen; buf->len = ctx->inputCap - ctx->inputLen; } else { - // TODO: log error + fnError("udfd can not allocate enough memory") buf->base = NULL; buf->len = 0; } @@ -580,8 +693,6 @@ void udfdPipeCloseCb(uv_handle_t *pipe) { taosMemoryFree(conn); } -void udfdUvHandleError(SUdfdUvConn *conn) { uv_close((uv_handle_t *)conn->client, udfdPipeCloseCb); } - void udfdPipeRead(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf) { fnDebug("udf read %zd bytes from client", nread); if (nread == 0) return; @@ -599,7 +710,7 @@ void udfdPipeRead(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf) { } if (nread < 0) { - fnDebug("Receive error %s", uv_err_name(nread)); + fnError("Receive error %s", uv_err_name(nread)); if (nread == UV_EOF) { // TODO check more when close } else { @@ -638,91 +749,6 @@ void udfdIntrSignalHandler(uv_signal_t *handle, int signum) { uv_stop(global.loop); } -static bool udfdRpcRfp(int32_t code) { - if (code == TSDB_CODE_RPC_REDIRECT) { - return true; - } else { - return false; - } -} - -int initEpSetFromCfg(const char *firstEp, const char *secondEp, SCorEpSet *pEpSet) { - pEpSet->version = 0; - - // init mnode ip set - SEpSet *mgmtEpSet = &(pEpSet->epSet); - mgmtEpSet->numOfEps = 0; - mgmtEpSet->inUse = 0; - - if (firstEp && firstEp[0] != 0) { - if (strlen(firstEp) >= TSDB_EP_LEN) { - terrno = TSDB_CODE_TSC_INVALID_FQDN; - return -1; - } - - int32_t code = taosGetFqdnPortFromEp(firstEp, &mgmtEpSet->eps[0]); - if (code != TSDB_CODE_SUCCESS) { - terrno = TSDB_CODE_TSC_INVALID_FQDN; - return terrno; - } - - mgmtEpSet->numOfEps++; - } - - if (secondEp && secondEp[0] != 0) { - if (strlen(secondEp) >= TSDB_EP_LEN) { - terrno = TSDB_CODE_TSC_INVALID_FQDN; - return -1; - } - - taosGetFqdnPortFromEp(secondEp, &mgmtEpSet->eps[mgmtEpSet->numOfEps]); - mgmtEpSet->numOfEps++; - } - - if (mgmtEpSet->numOfEps == 0) { - terrno = TSDB_CODE_TSC_INVALID_FQDN; - return -1; - } - - return 0; -} - -int32_t udfdOpenClientRpc() { - SRpcInit rpcInit = {0}; - rpcInit.label = "UDFD"; - rpcInit.numOfThreads = 1; - rpcInit.cfp = (RpcCfp)udfdProcessRpcRsp; - rpcInit.sessions = 1024; - rpcInit.connType = TAOS_CONN_CLIENT; - rpcInit.idleTime = tsShellActivityTimer * 1000; - rpcInit.user = TSDB_DEFAULT_USER; - rpcInit.parent = &global; - rpcInit.rfp = udfdRpcRfp; - - global.clientRpc = rpcOpen(&rpcInit); - if (global.clientRpc == NULL) { - fnError("failed to init dnode rpc client"); - return -1; - } - return 0; -} - -int32_t udfdCloseClientRpc() { - rpcClose(global.clientRpc); - return 0; -} - -static void udfdPrintVersion() { -#ifdef TD_ENTERPRISE - char *releaseName = "enterprise"; -#else - char *releaseName = "community"; -#endif - printf("%s version: %s compatible_version: %s\n", releaseName, version, compatible_version); - printf("gitinfo: %s\n", gitinfo); - printf("buildInfo: %s\n", buildinfo); -} - static int32_t udfdParseArgs(int32_t argc, char *argv[]) { for (int32_t i = 1; i < argc; ++i) { if (strcmp(argv[i], "-c") == 0) { @@ -745,6 +771,17 @@ static int32_t udfdParseArgs(int32_t argc, char *argv[]) { return 0; } +static void udfdPrintVersion() { +#ifdef TD_ENTERPRISE + char *releaseName = "enterprise"; +#else + char *releaseName = "community"; +#endif + printf("%s version: %s compatible_version: %s\n", releaseName, version, compatible_version); + printf("gitinfo: %s\n", gitinfo); + printf("buildInfo: %s\n", buildinfo); +} + static int32_t udfdInitLog() { char logName[12] = {0}; snprintf(logName, sizeof(logName), "%slog", "udfd"); @@ -834,6 +871,23 @@ static int32_t udfdRun() { return 0; } +void udfdConnectMnodeThreadFunc(void* args) { + int32_t retryMnodeTimes = 0; + int32_t code = 0; + while (retryMnodeTimes++ <= TSDB_MAX_REPLICA) { + uv_sleep(100 * (1 << retryMnodeTimes)); + code = udfdConnectToMnode(); + if (code == 0) { + break; + } + fnError("udfd can not connect to mnode, code: %s. retry", tstrerror(code)); + } + + if (code != 0) { + fnError("udfd can not connect to mnode"); + } +} + int main(int argc, char *argv[]) { if (!taosCheckSystemIsSmallEnd()) { printf("failed to start since on non-small-end machines\n"); @@ -866,30 +920,19 @@ int main(int argc, char *argv[]) { return -3; } - int32_t retryMnodeTimes = 0; - int32_t code = 0; - while (retryMnodeTimes++ < TSDB_MAX_REPLICA) { - uv_sleep(500 * (1 << retryMnodeTimes)); - code = udfdConnectToMnode(); - if (code == 0) { - break; - } - fnError("can not connect to mnode, code: %s. retry", tstrerror(code)); - } - - if (code != 0) { - fnError("failed to start since can not connect to mnode"); - return -4; - } - if (udfdUvInit() != 0) { fnError("uv init failure"); return -5; } + uv_thread_t mnodeConnectThread; + uv_thread_create(&mnodeConnectThread, udfdConnectMnodeThreadFunc, NULL); + udfdRun(); removeListeningPipe(); - + uv_thread_join(&mnodeConnectThread); udfdCloseClientRpc(); + + return 0; } diff --git a/source/libs/index/CMakeLists.txt b/source/libs/index/CMakeLists.txt index 7dc66e47898eed6e58288d775a0d4c78f79dc4b0..e55b004972d841a2049dc0474dbf3343b1cc300a 100644 --- a/source/libs/index/CMakeLists.txt +++ b/source/libs/index/CMakeLists.txt @@ -12,6 +12,9 @@ target_link_libraries( PUBLIC os PUBLIC util PUBLIC common + PUBLIC nodes + PUBLIC scalar + PUBLIC function ) if (${BUILD_WITH_LUCENE}) diff --git a/source/libs/index/inc/indexCache.h b/source/libs/index/inc/indexCache.h index d474d874090b09a4a9952e25a8a8f84eca77bf10..1046a04db34062367fb84bef2c6b292da6b147d5 100644 --- a/source/libs/index/inc/indexCache.h +++ b/source/libs/index/inc/indexCache.h @@ -38,7 +38,7 @@ typedef struct IndexCache { MemTable *mem, *imm; SIndex* index; char* colName; - int32_t version; + int64_t version; int64_t occupiedMem; int8_t type; uint64_t suid; @@ -47,12 +47,12 @@ typedef struct IndexCache { TdThreadCond finished; } IndexCache; -#define CACHE_VERSION(cache) atomic_load_32(&cache->version) +#define CACHE_VERSION(cache) atomic_load_64(&cache->version) typedef struct CacheTerm { // key char* colVal; - int32_t version; + int64_t version; // value uint64_t uid; int8_t colType; @@ -63,7 +63,10 @@ typedef struct CacheTerm { IndexCache* indexCacheCreate(SIndex* idx, uint64_t suid, const char* colName, int8_t type); +void indexCacheForceToMerge(void* cache); void indexCacheDestroy(void* cache); +void indexCacheBroadcast(void* cache); +void indexCacheWait(void* cache); Iterate* indexCacheIteratorCreate(IndexCache* cache); void indexCacheIteratorDestroy(Iterate* iiter); @@ -71,7 +74,7 @@ void indexCacheIteratorDestroy(Iterate* iiter); int indexCachePut(void* cache, SIndexTerm* term, uint64_t uid); // int indexCacheGet(void *cache, uint64_t *rst); -int indexCacheSearch(void* cache, SIndexTermQuery* query, SIdxTempResult* tr, STermValueType* s); +int indexCacheSearch(void* cache, SIndexTermQuery* query, SIdxTRslt* tr, STermValueType* s); void indexCacheRef(IndexCache* cache); void indexCacheUnRef(IndexCache* cache); diff --git a/source/libs/index/inc/indexInt.h b/source/libs/index/inc/indexInt.h index 27c380beafb53065f7bfd4e5955be234406a58f6..81d43daf133ab0613b3cc56ec68d82e87bc0326c 100644 --- a/source/libs/index/inc/indexInt.h +++ b/source/libs/index/inc/indexInt.h @@ -34,6 +34,15 @@ extern "C" { #endif +// clang-format off +#define indexFatal(...) do { if (idxDebugFlag & DEBUG_FATAL) { taosPrintLog("INDEX FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }} while (0) +#define indexError(...) do { if (idxDebugFlag & DEBUG_ERROR) { taosPrintLog("INDEX ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} while (0) +#define indexWarn(...) do { if (idxDebugFlag & DEBUG_WARN) { taosPrintLog("INDEX WARN ", DEBUG_WARN, 255, __VA_ARGS__); }} while (0) +#define indexInfo(...) do { if (idxDebugFlag & DEBUG_INFO) { taosPrintLog("INDEX ", DEBUG_INFO, 255, __VA_ARGS__); } } while (0) +#define indexDebug(...) do { if (idxDebugFlag & DEBUG_DEBUG) { taosPrintLog("INDEX ", DEBUG_DEBUG, sDebugFlag, __VA_ARGS__);} } while (0) +#define indexTrace(...) do { if (idxDebugFlag & DEBUG_TRACE) { taosPrintLog("INDEX ", DEBUG_TRACE, sDebugFlag, __VA_ARGS__);} } while (0) +// clang-format on + typedef enum { LT, LE, GT, GE } RangeType; typedef enum { kTypeValue, kTypeDeletion } STermValueType; @@ -58,6 +67,8 @@ struct SIndex { SIndexStat stat; TdThreadMutex mtx; + tsem_t sem; + bool quit; }; struct SIndexOpts { @@ -69,6 +80,7 @@ struct SIndexOpts { int32_t cacheSize; // MB // add cache module later #endif + int32_t cacheOpt; // MB }; struct SIndexMultiTermQuery { @@ -131,43 +143,6 @@ int32_t indexSerialCacheKey(ICacheKey* key, char* buf); // int32_t indexSerialKey(ICacheKey* key, char* buf); // int32_t indexSerialTermKey(SIndexTerm* itm, char* buf); -#define indexFatal(...) \ - do { \ - if (sDebugFlag & DEBUG_FATAL) { \ - taosPrintLog("index FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); \ - } \ - } while (0) -#define indexError(...) \ - do { \ - if (sDebugFlag & DEBUG_ERROR) { \ - taosPrintLog("index ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); \ - } \ - } while (0) -#define indexWarn(...) \ - do { \ - if (sDebugFlag & DEBUG_WARN) { \ - taosPrintLog("index WARN ", DEBUG_WARN, 255, __VA_ARGS__); \ - } \ - } while (0) -#define indexInfo(...) \ - do { \ - if (sDebugFlag & DEBUG_INFO) { \ - taosPrintLog("index ", DEBUG_INFO, 255, __VA_ARGS__); \ - } \ - } while (0) -#define indexDebug(...) \ - do { \ - if (sDebugFlag & DEBUG_DEBUG) { \ - taosPrintLog("index ", DEBUG_DEBUG, sDebugFlag, __VA_ARGS__); \ - } \ - } while (0) -#define indexTrace(...) \ - do { \ - if (sDebugFlag & DEBUG_TRACE) { \ - taosPrintLog("index ", DEBUG_TRACE, sDebugFlag, __VA_ARGS__); \ - } \ - } while (0) - #define INDEX_TYPE_CONTAIN_EXTERN_TYPE(ty, exTy) (((ty >> 4) & (exTy)) != 0) #define INDEX_TYPE_GET_TYPE(ty) (ty & 0x0F) diff --git a/source/libs/index/inc/indexTfile.h b/source/libs/index/inc/indexTfile.h index 9712e4b30f7ecfdabb4e8d3c824190f60319ec69..ca55aa93da5a47bcefa26bf880d115abeb46b8c8 100644 --- a/source/libs/index/inc/indexTfile.h +++ b/source/libs/index/inc/indexTfile.h @@ -28,19 +28,19 @@ extern "C" { // tfile header content // |<---suid--->|<---version--->|<-------colName------>|<---type-->|<--fstOffset->| -// |<-uint64_t->|<---int32_t--->|<--TSDB_COL_NAME_LEN-->|<-uint8_t->|<---int32_t-->| +// |<-uint64_t->|<---int64_t--->|<--TSDB_COL_NAME_LEN-->|<-uint8_t->|<---int32_t-->| #pragma pack(push, 1) typedef struct TFileHeader { uint64_t suid; - int32_t version; + int64_t version; char colName[TSDB_COL_NAME_LEN]; // uint8_t colType; int32_t fstOffset; } TFileHeader; #pragma pack(pop) -#define TFILE_HEADER_SIZE (sizeof(TFileHeader)) +#define TFILE_HEADER_SIZE (sizeof(TFileHeader)) #define TFILE_HEADER_NO_FST (TFILE_HEADER_SIZE - sizeof(int32_t)) typedef struct TFileValue { @@ -74,9 +74,10 @@ typedef struct TFileReader { } TFileReader; typedef struct IndexTFile { - char* path; - TFileCache* cache; - TFileWriter* tw; + char* path; + TFileCache* cache; + TFileWriter* tw; + TdThreadMutex mtx; } IndexTFile; typedef struct TFileWriterOpt { @@ -101,14 +102,14 @@ void tfileCachePut(TFileCache* tcache, ICacheKey* key, TFileReader* read TFileReader* tfileGetReaderByCol(IndexTFile* tf, uint64_t suid, char* colName); -TFileReader* tfileReaderOpen(char* path, uint64_t suid, int32_t version, const char* colName); +TFileReader* tfileReaderOpen(char* path, uint64_t suid, int64_t version, const char* colName); TFileReader* tfileReaderCreate(WriterCtx* ctx); void tfileReaderDestroy(TFileReader* reader); -int tfileReaderSearch(TFileReader* reader, SIndexTermQuery* query, SIdxTempResult* tr); +int tfileReaderSearch(TFileReader* reader, SIndexTermQuery* query, SIdxTRslt* tr); void tfileReaderRef(TFileReader* reader); void tfileReaderUnRef(TFileReader* reader); -TFileWriter* tfileWriterOpen(char* path, uint64_t suid, int32_t version, const char* colName, uint8_t type); +TFileWriter* tfileWriterOpen(char* path, uint64_t suid, int64_t version, const char* colName, uint8_t type); void tfileWriterClose(TFileWriter* tw); TFileWriter* tfileWriterCreate(WriterCtx* ctx, TFileHeader* header); void tfileWriterDestroy(TFileWriter* tw); @@ -119,7 +120,7 @@ int tfileWriterFinish(TFileWriter* tw); IndexTFile* indexTFileCreate(const char* path); void indexTFileDestroy(IndexTFile* tfile); int indexTFilePut(void* tfile, SIndexTerm* term, uint64_t uid); -int indexTFileSearch(void* tfile, SIndexTermQuery* query, SIdxTempResult* tr); +int indexTFileSearch(void* tfile, SIndexTermQuery* query, SIdxTRslt* tr); Iterate* tfileIteratorCreate(TFileReader* reader); void tfileIteratorDestroy(Iterate* iterator); diff --git a/source/libs/index/inc/indexUtil.h b/source/libs/index/inc/indexUtil.h index f1676ed411a5e2074667816d1746dc607dc0f44d..dbaecaa9630b04b8b50f108c1a59e499f04899dc 100644 --- a/source/libs/index/inc/indexUtil.h +++ b/source/libs/index/inc/indexUtil.h @@ -66,7 +66,7 @@ extern "C" { * [1, 4, 5] * output:[4, 5] */ -void iIntersection(SArray *interResults, SArray *finalResult); +void iIntersection(SArray *in, SArray *out); /* multi sorted result union * input: [1, 2, 4, 5] @@ -74,7 +74,7 @@ void iIntersection(SArray *interResults, SArray *finalResult); * [1, 4, 5] * output:[1, 2, 3, 4, 5] */ -void iUnion(SArray *interResults, SArray *finalResult); +void iUnion(SArray *in, SArray *out); /* see example * total: [1, 2, 4, 5, 7, 8] @@ -92,19 +92,24 @@ typedef struct { uint64_t data; } SIdxVerdata; +/* + * index temp result + * + */ typedef struct { SArray *total; - SArray *added; - SArray *deled; -} SIdxTempResult; + SArray *add; + SArray *del; +} SIdxTRslt; + +SIdxTRslt *idxTRsltCreate(); -SIdxTempResult *sIdxTempResultCreate(); +void idxTRsltClear(SIdxTRslt *tr); -void sIdxTempResultClear(SIdxTempResult *tr); +void idxTRsltDestroy(SIdxTRslt *tr); -void sIdxTempResultDestroy(SIdxTempResult *tr); +void idxTRsltMergeTo(SIdxTRslt *tr, SArray *out); -void sIdxTempResultMergeTo(SArray *result, SIdxTempResult *tr); #ifdef __cplusplus } #endif diff --git a/source/libs/index/src/index.c b/source/libs/index/src/index.c index 162d64c41c472e88aa019b4b5def7727850c636d..8584d95bf26bc2a586e0e5842ab8c4e5b5572bbd 100644 --- a/source/libs/index/src/index.c +++ b/source/libs/index/src/index.c @@ -29,7 +29,7 @@ #include "lucene++/Lucene_c.h" #endif -#define INDEX_NUM_OF_THREADS 4 +#define INDEX_NUM_OF_THREADS 5 #define INDEX_QUEUE_SIZE 200 #define INDEX_DATA_BOOL_NULL 0x02 @@ -85,11 +85,20 @@ static int indexMergeFinalResults(SArray* interResults, EIndexOperatorType oTyp static int indexGenTFile(SIndex* index, IndexCache* cache, SArray* batch); // merge cache and tfile by opera type -static void indexMergeCacheAndTFile(SArray* result, IterateValue* icache, IterateValue* iTfv, SIdxTempResult* helper); +static void indexMergeCacheAndTFile(SArray* result, IterateValue* icache, IterateValue* iTfv, SIdxTRslt* helper); // static int32_t indexSerialTermKey(SIndexTerm* itm, char* buf); // int32_t indexSerialKey(ICacheKey* key, char* buf); +static void indexPost(void* idx) { + SIndex* pIdx = idx; + tsem_post(&pIdx->sem); +} +static void indexWait(void* idx) { + SIndex* pIdx = idx; + tsem_wait(&pIdx->sem); +} + int indexOpen(SIndexOpts* opts, const char* path, SIndex** index) { taosThreadOnce(&isInit, indexInit); SIndex* sIdx = taosMemoryCalloc(1, sizeof(SIndex)); @@ -107,6 +116,7 @@ int indexOpen(SIndexOpts* opts, const char* path, SIndex** index) { sIdx->cVersion = 1; sIdx->path = tstrdup(path); taosThreadMutexInit(&sIdx->mtx, NULL); + tsem_init(&sIdx->sem, 0, 0); sIdx->refId = indexAddRef(sIdx); indexAcquireRef(sIdx->refId); @@ -124,22 +134,28 @@ END: void indexDestroy(void* handle) { SIndex* sIdx = handle; - void* iter = taosHashIterate(sIdx->colObj, NULL); - while (iter) { - IndexCache** pCache = iter; - if (*pCache) { - indexCacheUnRef(*pCache); - } - iter = taosHashIterate(sIdx->colObj, iter); - } - taosHashCleanup(sIdx->colObj); taosThreadMutexDestroy(&sIdx->mtx); + tsem_destroy(&sIdx->sem); indexTFileDestroy(sIdx->tindex); taosMemoryFree(sIdx->path); taosMemoryFree(sIdx); return; } void indexClose(SIndex* sIdx) { + bool ref = 0; + if (sIdx->colObj != NULL) { + void* iter = taosHashIterate(sIdx->colObj, NULL); + while (iter) { + IndexCache** pCache = iter; + indexCacheForceToMerge((void*)(*pCache)); + indexInfo("%s wait to merge", (*pCache)->colName); + indexWait((void*)(sIdx)); + iter = taosHashIterate(sIdx->colObj, iter); + indexCacheUnRef(*pCache); + } + taosHashCleanup(sIdx->colObj); + sIdx->colObj = NULL; + } indexReleaseRef(sIdx->refId); indexRemoveRef(sIdx->refId); } @@ -185,6 +201,7 @@ int indexPut(SIndex* index, SIndexMultiTerm* fVals, uint64_t uid) { char buf[128] = {0}; ICacheKey key = {.suid = p->suid, .colName = p->colName, .nColName = strlen(p->colName), .colType = p->colType}; int32_t sz = indexSerialCacheKey(&key, buf); + indexDebug("suid: %" PRIu64 ", colName: %s, colType: %d", key.suid, key.colName, key.colType); IndexCache** cache = taosHashGet(index->colObj, buf, sz); assert(*cache != NULL); @@ -312,6 +329,7 @@ static int indexTermSearch(SIndex* sIdx, SIndexTermQuery* query, SArray** result char buf[128] = {0}; ICacheKey key = { .suid = term->suid, .colName = term->colName, .nColName = strlen(term->colName), .colType = term->colType}; + indexDebug("suid: %" PRIu64 ", colName: %s, colType: %d", key.suid, key.colName, key.colType); int32_t sz = indexSerialCacheKey(&key, buf); taosThreadMutexLock(&sIdx->mtx); @@ -325,7 +343,7 @@ static int indexTermSearch(SIndex* sIdx, SIndexTermQuery* query, SArray** result int64_t st = taosGetTimestampUs(); - SIdxTempResult* tr = sIdxTempResultCreate(); + SIdxTRslt* tr = idxTRsltCreate(); if (0 == indexCacheSearch(cache, query, tr, &s)) { if (s == kTypeDeletion) { indexInfo("col: %s already drop by", term->colName); @@ -347,12 +365,12 @@ static int indexTermSearch(SIndex* sIdx, SIndexTermQuery* query, SArray** result int64_t cost = taosGetTimestampUs() - st; indexInfo("search cost: %" PRIu64 "us", cost); - sIdxTempResultMergeTo(*result, tr); + idxTRsltMergeTo(tr, *result); - sIdxTempResultDestroy(tr); + idxTRsltDestroy(tr); return 0; END: - sIdxTempResultDestroy(tr); + idxTRsltDestroy(tr); return -1; } static void indexInterResultsDestroy(SArray* results) { @@ -388,18 +406,18 @@ static int indexMergeFinalResults(SArray* interResults, EIndexOperatorType oType return 0; } -static void indexMayMergeTempToFinalResult(SArray* result, TFileValue* tfv, SIdxTempResult* tr) { +static void indexMayMergeTempToFinalResult(SArray* result, TFileValue* tfv, SIdxTRslt* tr) { int32_t sz = taosArrayGetSize(result); if (sz > 0) { TFileValue* lv = taosArrayGetP(result, sz - 1); if (tfv != NULL && strcmp(lv->colVal, tfv->colVal) != 0) { - sIdxTempResultMergeTo(lv->tableId, tr); - sIdxTempResultClear(tr); + idxTRsltMergeTo(tr, lv->tableId); + idxTRsltClear(tr); taosArrayPush(result, &tfv); } else if (tfv == NULL) { // handle last iterator - sIdxTempResultMergeTo(lv->tableId, tr); + idxTRsltMergeTo(tr, lv->tableId); } else { // temp result saved in help tfileValueDestroy(tfv); @@ -408,7 +426,7 @@ static void indexMayMergeTempToFinalResult(SArray* result, TFileValue* tfv, SIdx taosArrayPush(result, &tfv); } } -static void indexMergeCacheAndTFile(SArray* result, IterateValue* cv, IterateValue* tv, SIdxTempResult* tr) { +static void indexMergeCacheAndTFile(SArray* result, IterateValue* cv, IterateValue* tv, SIdxTRslt* tr) { char* colVal = (cv != NULL) ? cv->colVal : tv->colVal; TFileValue* tfv = tfileValueCreate(colVal); @@ -418,9 +436,9 @@ static void indexMergeCacheAndTFile(SArray* result, IterateValue* cv, IterateVal uint64_t id = *(uint64_t*)taosArrayGet(cv->val, 0); uint32_t ver = cv->ver; if (cv->type == ADD_VALUE) { - INDEX_MERGE_ADD_DEL(tr->deled, tr->added, id) + INDEX_MERGE_ADD_DEL(tr->del, tr->add, id) } else if (cv->type == DEL_VALUE) { - INDEX_MERGE_ADD_DEL(tr->added, tr->deled, id) + INDEX_MERGE_ADD_DEL(tr->add, tr->del, id) } } if (tv != NULL) { @@ -451,6 +469,18 @@ int indexFlushCacheToTFile(SIndex* sIdx, void* cache) { } // handle flush Iterate* cacheIter = indexCacheIteratorCreate(pCache); + if (cacheIter == NULL) { + indexError("%p immtable is empty, ignore merge opera", pCache); + indexCacheDestroyImm(pCache); + tfileReaderUnRef(pReader); + if (sIdx->quit) { + indexPost(sIdx); + // indexCacheBroadcast(pCache); + } + indexReleaseRef(sIdx->refId); + return 0; + } + Iterate* tfileIter = tfileIteratorCreate(pReader); if (tfileIter == NULL) { indexWarn("empty tfile reader iterator"); @@ -461,7 +491,7 @@ int indexFlushCacheToTFile(SIndex* sIdx, void* cache) { bool cn = cacheIter ? cacheIter->next(cacheIter) : false; bool tn = tfileIter ? tfileIter->next(tfileIter) : false; - SIdxTempResult* tr = sIdxTempResultCreate(); + SIdxTRslt* tr = idxTRsltCreate(); while (cn == true || tn == true) { IterateValue* cv = (cn == true) ? cacheIter->getValue(cacheIter) : NULL; IterateValue* tv = (tn == true) ? tfileIter->getValue(tfileIter) : NULL; @@ -487,7 +517,7 @@ int indexFlushCacheToTFile(SIndex* sIdx, void* cache) { } } indexMayMergeTempToFinalResult(result, NULL, tr); - sIdxTempResultDestroy(tr); + idxTRsltDestroy(tr); int ret = indexGenTFile(sIdx, pCache, result); indexDestroyFinalResult(result); @@ -506,7 +536,11 @@ int indexFlushCacheToTFile(SIndex* sIdx, void* cache) { } else { indexInfo("success to merge , time cost: %" PRId64 "ms", cost / 1000); } + if (sIdx->quit) { + indexPost(sIdx); + } indexReleaseRef(sIdx->refId); + return ret; } void iterateValueDestroy(IterateValue* value, bool destroy) { @@ -521,8 +555,27 @@ void iterateValueDestroy(IterateValue* value, bool destroy) { taosMemoryFree(value->colVal); value->colVal = NULL; } + +static int64_t indexGetAvaialbleVer(SIndex* sIdx, IndexCache* cache) { + ICacheKey key = {.suid = cache->suid, .colName = cache->colName, .nColName = strlen(cache->colName)}; + int64_t ver = CACHE_VERSION(cache); + + IndexTFile* tf = (IndexTFile*)(sIdx->tindex); + + taosThreadMutexLock(&tf->mtx); + TFileReader* rd = tfileCacheGet(tf->cache, &key); + taosThreadMutexUnlock(&tf->mtx); + + if (rd != NULL) { + ver = (ver > rd->header.version ? ver : rd->header.version) + 1; + indexInfo("header: %" PRId64 ", ver: %" PRId64 "", rd->header.version, ver); + } + tfileReaderUnRef(rd); + return ver; +} static int indexGenTFile(SIndex* sIdx, IndexCache* cache, SArray* batch) { - int32_t version = CACHE_VERSION(cache); + int64_t version = indexGetAvaialbleVer(sIdx, cache); + indexInfo("file name version: %" PRId64 "", version); uint8_t colType = cache->type; TFileWriter* tw = tfileWriterOpen(sIdx->path, cache->suid, version, cache->colName, colType); @@ -542,14 +595,16 @@ static int indexGenTFile(SIndex* sIdx, IndexCache* cache, SArray* batch) { if (reader == NULL) { return -1; } + indexInfo("success to create tfile, reopen it, %s", reader->ctx->file.buf); + + IndexTFile* tf = (IndexTFile*)sIdx->tindex; TFileHeader* header = &reader->header; ICacheKey key = {.suid = cache->suid, .colName = header->colName, .nColName = strlen(header->colName)}; - taosThreadMutexLock(&sIdx->mtx); - IndexTFile* ifile = (IndexTFile*)sIdx->tindex; - tfileCachePut(ifile->cache, &key, reader); - taosThreadMutexUnlock(&sIdx->mtx); + taosThreadMutexLock(&tf->mtx); + tfileCachePut(tf->cache, &key, reader); + taosThreadMutexUnlock(&tf->mtx); return ret; END: if (tw != NULL) { @@ -563,10 +618,11 @@ int32_t indexSerialCacheKey(ICacheKey* key, char* buf) { bool hasJson = INDEX_TYPE_CONTAIN_EXTERN_TYPE(key->colType, TSDB_DATA_TYPE_JSON); char* p = buf; - SERIALIZE_MEM_TO_BUF(buf, key, suid); + char tbuf[65] = {0}; + indexInt2str((int64_t)key->suid, tbuf, 0); + + SERIALIZE_STR_VAR_TO_BUF(buf, tbuf, strlen(tbuf)); SERIALIZE_VAR_TO_BUF(buf, '_', char); - // SERIALIZE_MEM_TO_BUF(buf, key, colType); - // SERIALIZE_VAR_TO_BUF(buf, '_', char); if (hasJson) { SERIALIZE_STR_VAR_TO_BUF(buf, JSON_COLUMN, strlen(JSON_COLUMN)); } else { diff --git a/source/libs/index/src/indexCache.c b/source/libs/index/src/indexCache.c index 9a2e487df1f5880dba5472574199e0bdcfbb58be..3b33006452989fbe8f69155f30041d6345b1d1e0 100644 --- a/source/libs/index/src/indexCache.c +++ b/source/libs/index/src/indexCache.c @@ -23,6 +23,7 @@ #define MEM_TERM_LIMIT 10 * 10000 #define MEM_THRESHOLD 64 * 1024 +#define MEM_SIGNAL_QUIT MEM_THRESHOLD * 20 #define MEM_ESTIMATE_RADIO 1.5 static void indexMemRef(MemTable* tbl); @@ -35,32 +36,31 @@ static char* indexCacheTermGet(const void* pData); static MemTable* indexInternalCacheCreate(int8_t type); -static int32_t cacheSearchTerm(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchPrefix(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchSuffix(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchRegex(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchLessThan(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchLessEqual(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchGreaterThan(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchGreaterEqual(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchRange(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); +static int32_t cacheSearchTerm(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchPrefix(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchSuffix(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchRegex(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchLessThan(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchLessEqual(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchGreaterThan(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchGreaterEqual(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchRange(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); /*comm func of compare, used in (LE/LT/GE/GT compare)*/ -static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s, - RangeType type); -static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchPrefix_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchSuffix_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchRegex_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchLessThan_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchLessEqual_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchGreaterThan_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchGreaterEqual_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchRange_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); - -static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s, +static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s, RangeType type); +static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchPrefix_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchSuffix_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchRegex_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchLessThan_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchLessEqual_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchGreaterThan_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchGreaterEqual_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchRange_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); + +static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s, RangeType type); -static int32_t (*cacheSearch[][QUERY_MAX])(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s) = { +static int32_t (*cacheSearch[][QUERY_MAX])(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s) = { {cacheSearchTerm, cacheSearchPrefix, cacheSearchSuffix, cacheSearchRegex, cacheSearchLessThan, cacheSearchLessEqual, cacheSearchGreaterThan, cacheSearchGreaterEqual, cacheSearchRange}, {cacheSearchTerm_JSON, cacheSearchPrefix_JSON, cacheSearchSuffix_JSON, cacheSearchRegex_JSON, @@ -70,7 +70,7 @@ static int32_t (*cacheSearch[][QUERY_MAX])(void* cache, SIndexTerm* ct, SIdxTemp static void doMergeWork(SSchedMsg* msg); static bool indexCacheIteratorNext(Iterate* itera); -static int32_t cacheSearchTerm(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchTerm(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { if (cache == NULL) { return 0; } @@ -79,7 +79,7 @@ static int32_t cacheSearchTerm(void* cache, SIndexTerm* term, SIdxTempResult* tr CacheTerm* pCt = taosMemoryCalloc(1, sizeof(CacheTerm)); pCt->colVal = term->colVal; - pCt->version = atomic_load_32(&pCache->version); + pCt->version = atomic_load_64(&pCache->version); char* key = indexCacheTermGet(pCt); @@ -92,11 +92,11 @@ static int32_t cacheSearchTerm(void* cache, SIndexTerm* term, SIdxTempResult* tr CacheTerm* c = (CacheTerm*)SL_GET_NODE_DATA(node); if (0 == strcmp(c->colVal, pCt->colVal)) { if (c->operaType == ADD_VALUE) { - INDEX_MERGE_ADD_DEL(tr->deled, tr->added, c->uid) + INDEX_MERGE_ADD_DEL(tr->del, tr->add, c->uid) // taosArrayPush(result, &c->uid); *s = kTypeValue; } else if (c->operaType == DEL_VALUE) { - INDEX_MERGE_ADD_DEL(tr->added, tr->deled, c->uid) + INDEX_MERGE_ADD_DEL(tr->add, tr->del, c->uid) } } else { break; @@ -107,20 +107,19 @@ static int32_t cacheSearchTerm(void* cache, SIndexTerm* term, SIdxTempResult* tr tSkipListDestroyIter(iter); return 0; } -static int32_t cacheSearchPrefix(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchPrefix(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { // impl later return 0; } -static int32_t cacheSearchSuffix(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchSuffix(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { // impl later return 0; } -static int32_t cacheSearchRegex(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchRegex(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { // impl later return 0; } -static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s, - RangeType type) { +static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s, RangeType type) { if (cache == NULL) { return 0; } @@ -132,7 +131,8 @@ static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* term, SIdxTempRes CacheTerm* pCt = taosMemoryCalloc(1, sizeof(CacheTerm)); pCt->colVal = term->colVal; - pCt->version = atomic_load_32(&pCache->version); + pCt->colType = term->colType; + pCt->version = atomic_load_64(&pCache->version); char* key = indexCacheTermGet(pCt); @@ -146,11 +146,11 @@ static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* term, SIdxTempRes TExeCond cond = cmpFn(c->colVal, pCt->colVal, pCt->colType); if (cond == MATCH) { if (c->operaType == ADD_VALUE) { - INDEX_MERGE_ADD_DEL(tr->deled, tr->added, c->uid) + INDEX_MERGE_ADD_DEL(tr->del, tr->add, c->uid) // taosArrayPush(result, &c->uid); *s = kTypeValue; } else if (c->operaType == DEL_VALUE) { - INDEX_MERGE_ADD_DEL(tr->added, tr->deled, c->uid) + INDEX_MERGE_ADD_DEL(tr->add, tr->del, c->uid) } } else if (cond == CONTINUE) { continue; @@ -162,20 +162,20 @@ static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* term, SIdxTempRes tSkipListDestroyIter(iter); return TSDB_CODE_SUCCESS; } -static int32_t cacheSearchLessThan(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchLessThan(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return cacheSearchCompareFunc(cache, term, tr, s, LT); } -static int32_t cacheSearchLessEqual(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchLessEqual(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return cacheSearchCompareFunc(cache, term, tr, s, LE); } -static int32_t cacheSearchGreaterThan(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchGreaterThan(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return cacheSearchCompareFunc(cache, term, tr, s, GT); } -static int32_t cacheSearchGreaterEqual(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchGreaterEqual(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return cacheSearchCompareFunc(cache, term, tr, s, GE); } -static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { if (cache == NULL) { return 0; } @@ -184,7 +184,7 @@ static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* term, SIdxTempResul CacheTerm* pCt = taosMemoryCalloc(1, sizeof(CacheTerm)); pCt->colVal = term->colVal; - pCt->version = atomic_load_32(&pCache->version); + pCt->version = atomic_load_64(&pCache->version); char* exBuf = NULL; if (INDEX_TYPE_CONTAIN_EXTERN_TYPE(term->colType, TSDB_DATA_TYPE_JSON)) { @@ -203,11 +203,11 @@ static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* term, SIdxTempResul if (0 == strcmp(c->colVal, pCt->colVal)) { if (c->operaType == ADD_VALUE) { - INDEX_MERGE_ADD_DEL(tr->deled, tr->added, c->uid) + INDEX_MERGE_ADD_DEL(tr->del, tr->add, c->uid) // taosArrayPush(result, &c->uid); *s = kTypeValue; } else if (c->operaType == DEL_VALUE) { - INDEX_MERGE_ADD_DEL(tr->added, tr->deled, c->uid) + INDEX_MERGE_ADD_DEL(tr->add, tr->del, c->uid) } } else { break; @@ -221,32 +221,32 @@ static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* term, SIdxTempResul return TSDB_CODE_SUCCESS; } -static int32_t cacheSearchPrefix_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchPrefix_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return TSDB_CODE_SUCCESS; } -static int32_t cacheSearchSuffix_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchSuffix_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return TSDB_CODE_SUCCESS; } -static int32_t cacheSearchRegex_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchRegex_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return TSDB_CODE_SUCCESS; } -static int32_t cacheSearchLessThan_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchLessThan_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return cacheSearchCompareFunc_JSON(cache, term, tr, s, LT); } -static int32_t cacheSearchLessEqual_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchLessEqual_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return cacheSearchCompareFunc_JSON(cache, term, tr, s, LE); } -static int32_t cacheSearchGreaterThan_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchGreaterThan_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return cacheSearchCompareFunc_JSON(cache, term, tr, s, GT); } -static int32_t cacheSearchGreaterEqual_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchGreaterEqual_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return cacheSearchCompareFunc_JSON(cache, term, tr, s, GE); } -static int32_t cacheSearchRange_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchRange_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return TSDB_CODE_SUCCESS; } -static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s, +static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s, RangeType type) { if (cache == NULL) { return 0; @@ -258,7 +258,7 @@ static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTe CacheTerm* pCt = taosMemoryCalloc(1, sizeof(CacheTerm)); pCt->colVal = term->colVal; - pCt->version = atomic_load_32(&pCache->version); + pCt->version = atomic_load_64(&pCache->version); int8_t dType = INDEX_TYPE_GET_TYPE(term->colType); int skip = 0; @@ -288,11 +288,11 @@ static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTe TExeCond cond = cmpFn(p + skip, term->colVal, dType); if (cond == MATCH) { if (c->operaType == ADD_VALUE) { - INDEX_MERGE_ADD_DEL(tr->deled, tr->added, c->uid) + INDEX_MERGE_ADD_DEL(tr->del, tr->add, c->uid) // taosArrayPush(result, &c->uid); *s = kTypeValue; } else if (c->operaType == DEL_VALUE) { - INDEX_MERGE_ADD_DEL(tr->added, tr->deled, c->uid) + INDEX_MERGE_ADD_DEL(tr->add, tr->del, c->uid) } } else if (cond == CONTINUE) { continue; @@ -308,7 +308,7 @@ static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTe return TSDB_CODE_SUCCESS; } -static int32_t cacheSearchRange(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchRange(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { // impl later return 0; } @@ -334,6 +334,9 @@ IndexCache* indexCacheCreate(SIndex* idx, uint64_t suid, const char* colName, in taosThreadCondInit(&cache->finished, NULL); indexCacheRef(cache); + if (idx != NULL) { + indexAcquireRef(idx->refId); + } return cache; } void indexCacheDebug(IndexCache* cache) { @@ -352,7 +355,7 @@ void indexCacheDebug(IndexCache* cache) { CacheTerm* ct = (CacheTerm*)SL_GET_NODE_DATA(node); if (ct != NULL) { // TODO, add more debug info - indexInfo("{colVal: %s, version: %d} \t", ct->colVal, ct->version); + indexInfo("{colVal: %s, version: %" PRId64 "} \t", ct->colVal, ct->version); } } tSkipListDestroyIter(iter); @@ -373,7 +376,7 @@ void indexCacheDebug(IndexCache* cache) { CacheTerm* ct = (CacheTerm*)SL_GET_NODE_DATA(node); if (ct != NULL) { // TODO, add more debug info - indexInfo("{colVal: %s, version: %d} \t", ct->colVal, ct->version); + indexInfo("{colVal: %s, version: %" PRId64 "} \t", ct->colVal, ct->version); } } tSkipListDestroyIter(iter); @@ -385,7 +388,7 @@ void indexCacheDebug(IndexCache* cache) { void indexCacheDestroySkiplist(SSkipList* slt) { SSkipListIterator* iter = tSkipListCreateIter(slt); - while (tSkipListIterNext(iter)) { + while (iter != NULL && tSkipListIterNext(iter)) { SSkipListNode* node = tSkipListIterGet(iter); CacheTerm* ct = (CacheTerm*)SL_GET_NODE_DATA(node); if (ct != NULL) { @@ -396,17 +399,24 @@ void indexCacheDestroySkiplist(SSkipList* slt) { tSkipListDestroyIter(iter); tSkipListDestroy(slt); } +void indexCacheBroadcast(void* cache) { + IndexCache* pCache = cache; + taosThreadCondBroadcast(&pCache->finished); +} +void indexCacheWait(void* cache) { + IndexCache* pCache = cache; + taosThreadCondWait(&pCache->finished, &pCache->mtx); +} void indexCacheDestroyImm(IndexCache* cache) { if (cache == NULL) { return; } - MemTable* tbl = NULL; taosThreadMutexLock(&cache->mtx); tbl = cache->imm; cache->imm = NULL; // or throw int bg thread - taosThreadCondBroadcast(&cache->finished); + indexCacheBroadcast(cache); taosThreadMutexUnlock(&cache->mtx); @@ -418,22 +428,27 @@ void indexCacheDestroy(void* cache) { if (pCache == NULL) { return; } + indexMemUnRef(pCache->mem); indexMemUnRef(pCache->imm); taosMemoryFree(pCache->colName); taosThreadMutexDestroy(&pCache->mtx); taosThreadCondDestroy(&pCache->finished); - + if (pCache->index != NULL) { + indexReleaseRef(((SIndex*)pCache->index)->refId); + } taosMemoryFree(pCache); } Iterate* indexCacheIteratorCreate(IndexCache* cache) { + if (cache->imm == NULL) { + return NULL; + } Iterate* iiter = taosMemoryCalloc(1, sizeof(Iterate)); if (iiter == NULL) { return NULL; } - taosThreadMutexLock(&cache->mtx); indexMemRef(cache->imm); @@ -458,17 +473,16 @@ void indexCacheIteratorDestroy(Iterate* iter) { taosMemoryFree(iter); } -int indexCacheSchedToMerge(IndexCache* pCache) { +int indexCacheSchedToMerge(IndexCache* pCache, bool notify) { SSchedMsg schedMsg = {0}; schedMsg.fp = doMergeWork; schedMsg.ahandle = pCache; - schedMsg.thandle = NULL; - // schedMsg.thandle = taosMemoryCalloc(1, sizeof(int64_t)); - // memcpy((char*)(schedMsg.thandle), (char*)&(pCache->index->refId), sizeof(int64_t)); + if (notify) { + schedMsg.thandle = taosMemoryMalloc(1); + } schedMsg.msg = NULL; indexAcquireRef(pCache->index->refId); taosScheduleTask(indexQhandle, &schedMsg); - return 0; } @@ -478,8 +492,10 @@ static void indexCacheMakeRoomForWrite(IndexCache* cache) { break; } else if (cache->imm != NULL) { // TODO: wake up by condition variable - taosThreadCondWait(&cache->finished, &cache->mtx); + indexCacheWait(cache); } else { + bool notifyQuit = cache->occupiedMem >= MEM_SIGNAL_QUIT ? true : false; + indexCacheRef(cache); cache->imm = cache->mem; cache->mem = indexInternalCacheCreate(cache->type); @@ -487,7 +503,7 @@ static void indexCacheMakeRoomForWrite(IndexCache* cache) { cache->occupiedMem = 0; // sched to merge // unref cache in bgwork - indexCacheSchedToMerge(cache); + indexCacheSchedToMerge(cache, notifyQuit); } } } @@ -512,7 +528,7 @@ int indexCachePut(void* cache, SIndexTerm* term, uint64_t uid) { ct->colVal = (char*)taosMemoryCalloc(1, sizeof(char) * (term->nColVal + 1)); memcpy(ct->colVal, term->colVal, term->nColVal); } - ct->version = atomic_add_fetch_32(&pCache->version, 1); + ct->version = atomic_add_fetch_64(&pCache->version, 1); // set value ct->uid = uid; ct->operaType = term->operType; @@ -533,12 +549,25 @@ int indexCachePut(void* cache, SIndexTerm* term, uint64_t uid) { return 0; // encode end } +void indexCacheForceToMerge(void* cache) { + IndexCache* pCache = cache; + indexCacheRef(pCache); + taosThreadMutexLock(&pCache->mtx); + + indexInfo("%p is forced to merge into tfile", pCache); + pCache->occupiedMem += MEM_SIGNAL_QUIT; + indexCacheMakeRoomForWrite(pCache); + + taosThreadMutexUnlock(&pCache->mtx); + indexCacheUnRef(pCache); + return; +} int indexCacheDel(void* cache, const char* fieldValue, int32_t fvlen, uint64_t uid, int8_t operType) { IndexCache* pCache = cache; return 0; } -static int32_t indexQueryMem(MemTable* mem, SIndexTermQuery* query, SIdxTempResult* tr, STermValueType* s) { +static int32_t indexQueryMem(MemTable* mem, SIndexTermQuery* query, SIdxTRslt* tr, STermValueType* s) { if (mem == NULL) { return 0; } @@ -552,7 +581,7 @@ static int32_t indexQueryMem(MemTable* mem, SIndexTermQuery* query, SIdxTempResu return cacheSearch[0][qtype](mem, term, tr, s); } } -int indexCacheSearch(void* cache, SIndexTermQuery* query, SIdxTempResult* result, STermValueType* s) { +int indexCacheSearch(void* cache, SIndexTermQuery* query, SIdxTRslt* result, STermValueType* s) { int64_t st = taosGetTimestampUs(); if (cache == NULL) { return 0; @@ -567,10 +596,10 @@ int indexCacheSearch(void* cache, SIndexTermQuery* query, SIdxTempResult* result indexMemRef(imm); taosThreadMutexUnlock(&pCache->mtx); - int ret = indexQueryMem(mem, query, result, s); + int ret = (mem && mem->mem) ? indexQueryMem(mem, query, result, s) : 0; if (ret == 0 && *s != kTypeDeletion) { // continue search in imm - ret = indexQueryMem(imm, query, result, s); + ret = (imm && imm->mem) ? indexQueryMem(imm, query, result, s) : 0; } indexMemUnRef(mem); @@ -633,7 +662,11 @@ static int32_t indexCacheTermCompare(const void* l, const void* r) { // compare colVal int32_t cmp = strcmp(lt->colVal, rt->colVal); if (cmp == 0) { - return rt->version - lt->version; + if (rt->version == lt->version) { + cmp = 0; + } else { + cmp = rt->version < lt->version ? -1 : 1; + } } return cmp; } @@ -675,7 +708,7 @@ static int32_t indexCacheJsonTermCompare(const void* l, const void* r) { return cmp; } static MemTable* indexInternalCacheCreate(int8_t type) { - int ttype = INDEX_TYPE_CONTAIN_EXTERN_TYPE(type, TSDB_DATA_TYPE_JSON) ? TSDB_DATA_TYPE_BINARY : type; + int ttype = INDEX_TYPE_CONTAIN_EXTERN_TYPE(type, TSDB_DATA_TYPE_JSON) ? TSDB_DATA_TYPE_BINARY : TSDB_DATA_TYPE_BINARY; int32_t (*cmpFn)(const void* l, const void* r) = INDEX_TYPE_CONTAIN_EXTERN_TYPE(type, TSDB_DATA_TYPE_JSON) ? indexCacheJsonTermCompare : indexCacheTermCompare; @@ -691,6 +724,9 @@ static MemTable* indexInternalCacheCreate(int8_t type) { static void doMergeWork(SSchedMsg* msg) { IndexCache* pCache = msg->ahandle; SIndex* sidx = (SIndex*)pCache->index; + + sidx->quit = msg->thandle ? true : false; + taosMemoryFree(msg->thandle); indexFlushCacheToTFile(sidx, pCache); } static bool indexCacheIteratorNext(Iterate* itera) { @@ -709,9 +745,6 @@ static bool indexCacheIteratorNext(Iterate* itera) { iv->type = ct->operaType; iv->ver = ct->version; iv->colVal = tstrdup(ct->colVal); - // printf("col Val: %s\n", iv->colVal); - // iv->colType = cv->colType; - taosArrayPush(iv->val, &ct->uid); } return next; diff --git a/source/libs/index/src/indexComm.c b/source/libs/index/src/indexComm.c index 4c23e4ba4b9a89d124a93a434da27158891816bc..78c7babb681e44629281f0ffd6ea6ba835495b5b 100644 --- a/source/libs/index/src/indexComm.c +++ b/source/libs/index/src/indexComm.c @@ -22,6 +22,29 @@ #include "ttypes.h" #include "tvariant.h" +#define INDEX_DATA_BOOL_NULL 0x02 +#define INDEX_DATA_TINYINT_NULL 0x80 +#define INDEX_DATA_SMALLINT_NULL 0x8000 +#define INDEX_DATA_INT_NULL 0x80000000L +#define INDEX_DATA_BIGINT_NULL 0x8000000000000000L +#define INDEX_DATA_TIMESTAMP_NULL TSDB_DATA_BIGINT_NULL + +#define INDEX_DATA_FLOAT_NULL 0x7FF00000 // it is an NAN +#define INDEX_DATA_DOUBLE_NULL 0x7FFFFF0000000000L // an NAN +#define INDEX_DATA_NCHAR_NULL 0xFFFFFFFF +#define INDEX_DATA_BINARY_NULL 0xFF +#define INDEX_DATA_JSON_NULL 0xFFFFFFFF +#define INDEX_DATA_JSON_null 0xFFFFFFFE +#define INDEX_DATA_JSON_NOT_NULL 0x01 + +#define INDEX_DATA_UTINYINT_NULL 0xFF +#define INDEX_DATA_USMALLINT_NULL 0xFFFF +#define INDEX_DATA_UINT_NULL 0xFFFFFFFF +#define INDEX_DATA_UBIGINT_NULL 0xFFFFFFFFFFFFFFFFL + +#define INDEX_DATA_NULL_STR "NULL" +#define INDEX_DATA_NULL_STR_L "null" + char JSON_COLUMN[] = "JSON"; char JSON_VALUE_DELIM = '&'; @@ -372,7 +395,7 @@ int32_t indexConvertDataToStr(void* src, int8_t type, void** dst) { tlen = taosEncodeBinary(NULL, varDataVal(src), varDataLen(src)); *dst = taosMemoryCalloc(1, tlen + 1); tlen = taosEncodeBinary(dst, varDataVal(src), varDataLen(src)); - *dst = (char*) * dst - tlen; + *dst = (char*)*dst - tlen; break; } case TSDB_DATA_TYPE_VARCHAR: { // TSDB_DATA_TYPE_BINARY diff --git a/source/libs/executor/src/indexoperator.c b/source/libs/index/src/indexFilter.c similarity index 85% rename from source/libs/executor/src/indexoperator.c rename to source/libs/index/src/indexFilter.c index 2c204e93563367b820eabc27321631935beb2489..b882caa168a3b89dcd037ee34eefa2f8b82bd904 100644 --- a/source/libs/executor/src/indexoperator.c +++ b/source/libs/index/src/indexFilter.c @@ -13,44 +13,18 @@ * along with this program. If not, see . */ -#include "indexoperator.h" -#include "executorimpl.h" #include "index.h" +#include "indexInt.h" #include "nodes.h" +#include "querynodes.h" +#include "scalar.h" #include "tdatablock.h" -typedef struct SIFCtx { - int32_t code; - SHashObj *pRes; /* element is SScalarParam */ - bool noExec; // true: just iterate condition tree, and add hint to executor plan - // SIdxFltStatus st; -} SIFCtx; - -#define SIF_ERR_RET(c) \ - do { \ - int32_t _code = c; \ - if (_code != TSDB_CODE_SUCCESS) { \ - terrno = _code; \ - return _code; \ - } \ - } while (0) -#define SIF_RET(c) \ - do { \ - int32_t _code = c; \ - if (_code != TSDB_CODE_SUCCESS) { \ - terrno = _code; \ - } \ - return _code; \ - } while (0) -#define SIF_ERR_JRET(c) \ - do { \ - code = c; \ - if (code != TSDB_CODE_SUCCESS) { \ - terrno = code; \ - goto _return; \ - } \ - } while (0) - +// clang-format off +#define SIF_ERR_RET(c) do { int32_t _code = c; if (_code != TSDB_CODE_SUCCESS) { terrno = _code; return _code; } } while (0) +#define SIF_RET(c) do { int32_t _code = c; if (_code != TSDB_CODE_SUCCESS) { terrno = _code; } return _code; } while (0) +#define SIF_ERR_JRET(c) do { code = c; if (code != TSDB_CODE_SUCCESS) { terrno = code; goto _return; } } while (0) +// clang-format on typedef struct SIFParam { SHashObj *pFilter; @@ -63,8 +37,18 @@ typedef struct SIFParam { int64_t suid; // add later char dbName[TSDB_DB_NAME_LEN]; char colName[TSDB_COL_NAME_LEN]; + + SIndexMetaArg arg; } SIFParam; +typedef struct SIFCtx { + int32_t code; + SHashObj * pRes; /* element is SIFParam */ + bool noExec; // true: just iterate condition tree, and add hint to executor plan + SIndexMetaArg arg; + // SIdxFltStatus st; +} SIFCtx; + static int32_t sifGetFuncFromSql(EOperatorType src, EIndexQueryType *dst) { if (src == OP_TYPE_GREATER_THAN) { *dst = QUERY_GREATER_THAN; @@ -89,9 +73,9 @@ typedef int32_t (*sif_func_t)(SIFParam *left, SIFParam *rigth, SIFParam *output) static sif_func_t sifNullFunc = NULL; // typedef struct SIFWalkParm // construct tag filter operator later -static void destroyTagFilterOperatorInfo(void *param) { - STagFilterOperatorInfo *pInfo = (STagFilterOperatorInfo *)param; -} +// static void destroyTagFilterOperatorInfo(void *param) { +// STagFilterOperatorInfo *pInfo = (STagFilterOperatorInfo *)param; +//} static void sifFreeParam(SIFParam *param) { if (param == NULL) return; @@ -198,13 +182,13 @@ static int32_t sifInitParam(SNode *node, SIFParam *param, SIFCtx *ctx) { case QUERY_NODE_NODE_LIST: { SNodeListNode *nl = (SNodeListNode *)node; if (LIST_LENGTH(nl->pNodeList) <= 0) { - qError("invalid length for node:%p, length: %d", node, LIST_LENGTH(nl->pNodeList)); + indexError("invalid length for node:%p, length: %d", node, LIST_LENGTH(nl->pNodeList)); SIF_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); } SIF_ERR_RET(scalarGenerateSetFromList((void **)¶m->pFilter, node, nl->dataType.type)); if (taosHashPut(ctx->pRes, &node, POINTER_BYTES, param, sizeof(*param))) { taosHashCleanup(param->pFilter); - qError("taosHashPut nodeList failed, size:%d", (int32_t)sizeof(*param)); + indexError("taosHashPut nodeList failed, size:%d", (int32_t)sizeof(*param)); SIF_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); } break; @@ -214,7 +198,7 @@ static int32_t sifInitParam(SNode *node, SIFParam *param, SIFCtx *ctx) { case QUERY_NODE_LOGIC_CONDITION: { SIFParam *res = (SIFParam *)taosHashGet(ctx->pRes, &node, POINTER_BYTES); if (NULL == res) { - qError("no result for node, type:%d, node:%p", nodeType(node), node); + indexError("no result for node, type:%d, node:%p", nodeType(node), node); SIF_ERR_RET(TSDB_CODE_QRY_APP_ERROR); } *param = *res; @@ -230,7 +214,7 @@ static int32_t sifInitOperParams(SIFParam **params, SOperatorNode *node, SIFCtx int32_t code = 0; int32_t nParam = sifGetOperParamNum(node->opType); if (NULL == node->pLeft || (nParam == 2 && NULL == node->pRight)) { - qError("invalid operation node, left: %p, rigth: %p", node->pLeft, node->pRight); + indexError("invalid operation node, left: %p, rigth: %p", node->pLeft, node->pRight); SIF_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); } SIFParam *paramList = taosMemoryCalloc(nParam, sizeof(SIFParam)); @@ -252,7 +236,7 @@ static int32_t sifInitParamList(SIFParam **params, SNodeList *nodeList, SIFCtx * int32_t code = 0; SIFParam *tParams = taosMemoryCalloc(nodeList->length, sizeof(SIFParam)); if (tParams == NULL) { - qError("failed to calloc, nodeList: %p", nodeList); + indexError("failed to calloc, nodeList: %p", nodeList); SIF_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); } @@ -272,11 +256,13 @@ _return: SIF_RET(code); } static int32_t sifExecFunction(SFunctionNode *node, SIFCtx *ctx, SIFParam *output) { - qError("index-filter not support buildin function"); + indexError("index-filter not support buildin function"); return TSDB_CODE_QRY_INVALID_INPUT; } static int32_t sifDoIndex(SIFParam *left, SIFParam *right, int8_t operType, SIFParam *output) { - SIndexTerm *tm = indexTermCreate(left->suid, DEFAULT, left->colValType, left->colName, strlen(left->colName), +#ifdef USE_INVERTED_INDEX + SIndexMetaArg *arg = &output->arg; + SIndexTerm * tm = indexTermCreate(arg->suid, DEFAULT, left->colValType, left->colName, strlen(left->colName), right->condValue, strlen(right->condValue)); if (tm == NULL) { return TSDB_CODE_QRY_OUT_OF_MEMORY; @@ -287,9 +273,13 @@ static int32_t sifDoIndex(SIFParam *left, SIFParam *right, int8_t operType, SIFP SIndexMultiTermQuery *mtm = indexMultiTermQueryCreate(MUST); indexMultiTermQueryAdd(mtm, tm, qtype); - int ret = indexSearch(NULL, mtm, output->result); + int ret = indexSearch(arg->metaHandle, mtm, output->result); + indexDebug("index filter data size: %d", (int)taosArrayGetSize(output->result)); indexMultiTermQueryDestroy(mtm); return ret; +#else + return 0; +#endif } static int32_t sifLessThanFunc(SIFParam *left, SIFParam *right, SIFParam *output) { @@ -391,6 +381,8 @@ static int32_t sifExecOper(SOperatorNode *node, SIFCtx *ctx, SIFParam *output) { SIFParam *params = NULL; SIF_ERR_RET(sifInitOperParams(¶ms, node, ctx)); + // ugly code, refactor later + output->arg = ctx->arg; sif_func_t operFn = sifGetOperFn(node->opType); if (ctx->noExec && operFn == NULL) { output->status = SFLT_NOT_INDEX; @@ -410,8 +402,8 @@ _return: static int32_t sifExecLogic(SLogicConditionNode *node, SIFCtx *ctx, SIFParam *output) { if (NULL == node->pParameterList || node->pParameterList->length <= 0) { - qError("invalid logic parameter list, list:%p, paramNum:%d", node->pParameterList, - node->pParameterList ? node->pParameterList->length : 0); + indexError("invalid logic parameter list, list:%p, paramNum:%d", node->pParameterList, + node->pParameterList ? node->pParameterList->length : 0); return TSDB_CODE_QRY_INVALID_INPUT; } @@ -442,7 +434,7 @@ _return: static EDealRes sifWalkFunction(SNode *pNode, void *context) { SFunctionNode *node = (SFunctionNode *)pNode; - SIFParam output = {0}; + SIFParam output = {.result = taosArrayInit(8, sizeof(uint64_t))}; SIFCtx *ctx = context; ctx->code = sifExecFunction(node, ctx, &output); @@ -458,7 +450,8 @@ static EDealRes sifWalkFunction(SNode *pNode, void *context) { } static EDealRes sifWalkLogic(SNode *pNode, void *context) { SLogicConditionNode *node = (SLogicConditionNode *)pNode; - SIFParam output = {0}; + + SIFParam output = {.result = taosArrayInit(8, sizeof(uint64_t))}; SIFCtx *ctx = context; ctx->code = sifExecLogic(node, ctx, &output); @@ -474,7 +467,7 @@ static EDealRes sifWalkLogic(SNode *pNode, void *context) { } static EDealRes sifWalkOper(SNode *pNode, void *context) { SOperatorNode *node = (SOperatorNode *)pNode; - SIFParam output = {0}; + SIFParam output = {.result = taosArrayInit(8, sizeof(uint64_t))}; SIFCtx *ctx = context; ctx->code = sifExecOper(node, ctx, &output); @@ -505,7 +498,7 @@ EDealRes sifCalcWalker(SNode *node, void *context) { return sifWalkOper(node, ctx); } - qError("invalid node type for index filter calculating, type:%d", nodeType(node)); + indexError("invalid node type for index filter calculating, type:%d", nodeType(node)); ctx->code = TSDB_CODE_QRY_INVALID_INPUT; return DEAL_RES_ERROR; } @@ -526,10 +519,11 @@ static int32_t sifCalculate(SNode *pNode, SIFParam *pDst) { return TSDB_CODE_QRY_INVALID_INPUT; } int32_t code = 0; - SIFCtx ctx = {.code = 0, .noExec = false}; + SIFCtx ctx = {.code = 0, .noExec = false, .arg = pDst->arg}; ctx.pRes = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); + if (NULL == ctx.pRes) { - qError("index-filter failed to taosHashInit"); + indexError("index-filter failed to taosHashInit"); return TSDB_CODE_QRY_OUT_OF_MEMORY; } @@ -539,10 +533,12 @@ static int32_t sifCalculate(SNode *pNode, SIFParam *pDst) { if (pDst) { SIFParam *res = (SIFParam *)taosHashGet(ctx.pRes, (void *)&pNode, POINTER_BYTES); if (res == NULL) { - qError("no valid res in hash, node:(%p), type(%d)", (void *)&pNode, nodeType(pNode)); + indexError("no valid res in hash, node:(%p), type(%d)", (void *)&pNode, nodeType(pNode)); SIF_ERR_RET(TSDB_CODE_QRY_APP_ERROR); } - taosArrayAddAll(pDst->result, res->result); + if (res->result != NULL) { + taosArrayAddAll(pDst->result, res->result); + } sifFreeParam(res); taosHashRemove(ctx.pRes, (void *)&pNode, POINTER_BYTES); @@ -559,7 +555,7 @@ static int32_t sifGetFltHint(SNode *pNode, SIdxFltStatus *status) { SIFCtx ctx = {.code = 0, .noExec = true}; ctx.pRes = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); if (NULL == ctx.pRes) { - qError("index-filter failed to taosHashInit"); + indexError("index-filter failed to taosHashInit"); return TSDB_CODE_QRY_OUT_OF_MEMORY; } @@ -569,7 +565,7 @@ static int32_t sifGetFltHint(SNode *pNode, SIdxFltStatus *status) { SIFParam *res = (SIFParam *)taosHashGet(ctx.pRes, (void *)&pNode, POINTER_BYTES); if (res == NULL) { - qError("no valid res in hash, node:(%p), type(%d)", (void *)&pNode, nodeType(pNode)); + indexError("no valid res in hash, node:(%p), type(%d)", (void *)&pNode, nodeType(pNode)); SIF_ERR_RET(TSDB_CODE_QRY_APP_ERROR); } *status = res->status; @@ -580,7 +576,7 @@ static int32_t sifGetFltHint(SNode *pNode, SIdxFltStatus *status) { SIF_RET(code); } -int32_t doFilterTag(const SNode *pFilterNode, SArray *result) { +int32_t doFilterTag(const SNode *pFilterNode, SIndexMetaArg *metaArg, SArray *result) { if (pFilterNode == NULL) { return TSDB_CODE_SUCCESS; } @@ -589,10 +585,12 @@ int32_t doFilterTag(const SNode *pFilterNode, SArray *result) { // todo move to the initialization function // SIF_ERR_RET(filterInitFromNode((SNode *)pFilterNode, &filter, 0)); - SIFParam param = {0}; + SArray * output = taosArrayInit(8, sizeof(uint64_t)); + SIFParam param = {.arg = *metaArg, .result = output}; SIF_ERR_RET(sifCalculate((SNode *)pFilterNode, ¶m)); taosArrayAddAll(result, param.result); + // taosArrayAddAll(result, param.result); sifFreeParam(¶m); SIF_RET(TSDB_CODE_SUCCESS); } diff --git a/source/libs/index/src/indexFst.c b/source/libs/index/src/indexFst.c index 335b0865269604432259847de072a53854286c2c..892716f38708fed46bc755548436f2477d1e91e5 100644 --- a/source/libs/index/src/indexFst.c +++ b/source/libs/index/src/indexFst.c @@ -1324,7 +1324,7 @@ StreamWithStateResult* streamWithStateNextWith(StreamWithState* sws, StreamCallb if (FST_NODE_ADDR(p->node) != fstGetRootAddr(sws->fst)) { taosArrayPop(sws->inp); } - // streamStateDestroy(p); + streamStateDestroy(p); continue; } FstTransition trn; diff --git a/source/libs/index/src/indexFstCountingWriter.c b/source/libs/index/src/indexFstCountingWriter.c index 1d4395aff6bb01eade0c27d9b26241aab1b5e005..8ba51736028515f2d671ecab876ed7437c96b8b7 100644 --- a/source/libs/index/src/indexFstCountingWriter.c +++ b/source/libs/index/src/indexFstCountingWriter.c @@ -97,6 +97,7 @@ WriterCtx* writerCtxCreate(WriterType type, const char* path, bool readOnly, int int64_t file_size; taosStatFile(path, &file_size, NULL); ctx->file.size = (int)file_size; + } else { // ctx->file.pFile = open(path, O_RDONLY, S_IRWXU | S_IRWXG | S_IRWXO); ctx->file.pFile = taosOpenFile(path, TD_FILE_READ); diff --git a/source/libs/index/src/indexFstUtil.c b/source/libs/index/src/indexFstUtil.c index a980c6b740ab4f5b0e128479de342ce84c159c3c..5760b24900ef47e6a52419ade3d91cee9870709a 100644 --- a/source/libs/index/src/indexFstUtil.c +++ b/source/libs/index/src/indexFstUtil.c @@ -93,14 +93,15 @@ FstSlice fstSliceCreate(uint8_t* data, uint64_t len) { // just shallow copy FstSlice fstSliceCopy(FstSlice* s, int32_t start, int32_t end) { FstString* str = s->str; - str->ref++; + atomic_add_fetch_32(&str->ref, 1); FstSlice t = {.str = str, .start = start + s->start, .end = end + s->start}; return t; } FstSlice fstSliceDeepCopy(FstSlice* s, int32_t start, int32_t end) { - int32_t tlen = end - start + 1; - int32_t slen; + int32_t tlen = end - start + 1; + int32_t slen; + uint8_t* data = fstSliceData(s, &slen); assert(tlen <= slen); @@ -129,8 +130,9 @@ uint8_t* fstSliceData(FstSlice* s, int32_t* size) { } void fstSliceDestroy(FstSlice* s) { FstString* str = s->str; - str->ref--; - if (str->ref == 0) { + + int32_t ref = atomic_sub_fetch_32(&str->ref, 1); + if (ref == 0) { taosMemoryFree(str->data); taosMemoryFree(str); s->str = NULL; diff --git a/source/libs/index/src/indexTfile.c b/source/libs/index/src/indexTfile.c index dd6117ed2ac9aa7c0add1c1e5015543187877942..53dd2923ac8c1f07b62098a3663c030016b46a72 100644 --- a/source/libs/index/src/indexTfile.c +++ b/source/libs/index/src/indexTfile.c @@ -1,6 +1,5 @@ /* * Copyright (c) 2019 TAOS Data, Inc. -p * * This program is free software: you can use, redistribute, and/or modify * it under the terms of the GNU Affero General Public License, version 3 * or later ("AGPL"), as published by the Free Software Foundation. @@ -55,37 +54,37 @@ static SArray* tfileGetFileList(const char* path); static int tfileRmExpireFile(SArray* result); static void tfileDestroyFileName(void* elem); static int tfileCompare(const void* a, const void* b); -static int tfileParseFileName(const char* filename, uint64_t* suid, char* col, int* version); -static void tfileGenFileName(char* filename, uint64_t suid, const char* col, int version); -static void tfileGenFileFullName(char* fullname, const char* path, uint64_t suid, const char* col, int32_t version); +static int tfileParseFileName(const char* filename, uint64_t* suid, char* col, int64_t* version); +static void tfileGenFileName(char* filename, uint64_t suid, const char* col, int64_t version); +static void tfileGenFileFullName(char* fullname, const char* path, uint64_t suid, const char* col, int64_t version); /* * search from tfile */ -static int32_t tfSearchTerm(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchPrefix(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchSuffix(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchRegex(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchLessThan(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchLessEqual(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchGreaterThan(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchGreaterEqual(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchRange(void* reader, SIndexTerm* tem, SIdxTempResult* tr); - -static int32_t tfSearchCompareFunc(void* reader, SIndexTerm* tem, SIdxTempResult* tr, RangeType ctype); - -static int32_t tfSearchTerm_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchPrefix_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchSuffix_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchRegex_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchLessThan_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchLessEqual_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchGreaterThan_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchGreaterEqual_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchRange_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); - -static int32_t tfSearchCompareFunc_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr, RangeType ctype); - -static int32_t (*tfSearch[][QUERY_MAX])(void* reader, SIndexTerm* tem, SIdxTempResult* tr) = { +static int32_t tfSearchTerm(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchPrefix(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchSuffix(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchRegex(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchLessThan(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchLessEqual(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchGreaterThan(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchGreaterEqual(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchRange(void* reader, SIndexTerm* tem, SIdxTRslt* tr); + +static int32_t tfSearchCompareFunc(void* reader, SIndexTerm* tem, SIdxTRslt* tr, RangeType ctype); + +static int32_t tfSearchTerm_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchPrefix_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchSuffix_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchRegex_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchLessThan_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchLessEqual_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchGreaterThan_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchGreaterEqual_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchRange_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); + +static int32_t tfSearchCompareFunc_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr, RangeType ctype); + +static int32_t (*tfSearch[][QUERY_MAX])(void* reader, SIndexTerm* tem, SIdxTRslt* tr) = { {tfSearchTerm, tfSearchPrefix, tfSearchSuffix, tfSearchRegex, tfSearchLessThan, tfSearchLessEqual, tfSearchGreaterThan, tfSearchGreaterEqual, tfSearchRange}, {tfSearchTerm_JSON, tfSearchPrefix_JSON, tfSearchSuffix_JSON, tfSearchRegex_JSON, tfSearchLessThan_JSON, @@ -141,7 +140,6 @@ void tfileCacheDestroy(TFileCache* tcache) { TFileReader* p = *reader; indexInfo("drop table cache suid: %" PRIu64 ", colName: %s, colType: %d", p->header.suid, p->header.colName, p->header.colType); - tfileReaderUnRef(p); reader = taosHashIterate(tcache->tableCache, reader); } @@ -154,7 +152,7 @@ TFileReader* tfileCacheGet(TFileCache* tcache, ICacheKey* key) { int32_t sz = indexSerialCacheKey(key, buf); assert(sz < sizeof(buf)); TFileReader** reader = taosHashGet(tcache->tableCache, buf, sz); - if (reader == NULL) { + if (reader == NULL || *reader == NULL) { return NULL; } tfileReaderRef(*reader); @@ -166,13 +164,13 @@ void tfileCachePut(TFileCache* tcache, ICacheKey* key, TFileReader* reader) { int32_t sz = indexSerialCacheKey(key, buf); // remove last version index reader TFileReader** p = taosHashGet(tcache->tableCache, buf, sz); - if (p != NULL) { - TFileReader* oldReader = *p; + if (p != NULL && *p != NULL) { + TFileReader* oldRdr = *p; taosHashRemove(tcache->tableCache, buf, sz); - oldReader->remove = true; - tfileReaderUnRef(oldReader); + indexInfo("found %s, should remove file %s", buf, oldRdr->ctx->file.buf); + oldRdr->remove = true; + tfileReaderUnRef(oldRdr); } - taosHashPut(tcache->tableCache, buf, sz, &reader, sizeof(void*)); tfileReaderRef(reader); return; @@ -182,7 +180,6 @@ TFileReader* tfileReaderCreate(WriterCtx* ctx) { if (reader == NULL) { return NULL; } - reader->ctx = ctx; if (0 != tfileReaderVerify(reader)) { @@ -204,6 +201,7 @@ TFileReader* tfileReaderCreate(WriterCtx* ctx) { tfileReaderDestroy(reader); return NULL; } + reader->remove = false; return reader; } @@ -213,10 +211,16 @@ void tfileReaderDestroy(TFileReader* reader) { } // T_REF_INC(reader); fstDestroy(reader->fst); + if (reader->remove) { + indexInfo("%s is removed", reader->ctx->file.buf); + } else { + indexInfo("%s is not removed", reader->ctx->file.buf); + } writerCtxDestroy(reader->ctx, reader->remove); + taosMemoryFree(reader); } -static int32_t tfSearchTerm(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchTerm(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { int ret = 0; char* p = tem->colVal; uint64_t sz = tem->nColVal; @@ -239,7 +243,7 @@ static int32_t tfSearchTerm(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { return 0; } -static int32_t tfSearchPrefix(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchPrefix(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { bool hasJson = INDEX_TYPE_CONTAIN_EXTERN_TYPE(tem->colType, TSDB_DATA_TYPE_JSON); char* p = tem->colVal; uint64_t sz = tem->nColVal; @@ -275,7 +279,7 @@ static int32_t tfSearchPrefix(void* reader, SIndexTerm* tem, SIdxTempResult* tr) } return 0; } -static int32_t tfSearchSuffix(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchSuffix(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { bool hasJson = INDEX_TYPE_CONTAIN_EXTERN_TYPE(tem->colType, TSDB_DATA_TYPE_JSON); int ret = 0; @@ -294,7 +298,7 @@ static int32_t tfSearchSuffix(void* reader, SIndexTerm* tem, SIdxTempResult* tr) fstSliceDestroy(&key); return 0; } -static int32_t tfSearchRegex(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchRegex(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { bool hasJson = INDEX_TYPE_CONTAIN_EXTERN_TYPE(tem->colType, TSDB_DATA_TYPE_JSON); int ret = 0; @@ -315,7 +319,7 @@ static int32_t tfSearchRegex(void* reader, SIndexTerm* tem, SIdxTempResult* tr) return 0; } -static int32_t tfSearchCompareFunc(void* reader, SIndexTerm* tem, SIdxTempResult* tr, RangeType type) { +static int32_t tfSearchCompareFunc(void* reader, SIndexTerm* tem, SIdxTRslt* tr, RangeType type) { int ret = 0; char* p = tem->colVal; int skip = 0; @@ -354,19 +358,19 @@ static int32_t tfSearchCompareFunc(void* reader, SIndexTerm* tem, SIdxTempResult fstStreamBuilderDestroy(sb); return TSDB_CODE_SUCCESS; } -static int32_t tfSearchLessThan(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchLessThan(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { return tfSearchCompareFunc(reader, tem, tr, LT); } -static int32_t tfSearchLessEqual(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchLessEqual(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { return tfSearchCompareFunc(reader, tem, tr, LE); } -static int32_t tfSearchGreaterThan(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchGreaterThan(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { return tfSearchCompareFunc(reader, tem, tr, GT); } -static int32_t tfSearchGreaterEqual(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchGreaterEqual(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { return tfSearchCompareFunc(reader, tem, tr, GE); } -static int32_t tfSearchRange(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchRange(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { bool hasJson = INDEX_TYPE_CONTAIN_EXTERN_TYPE(tem->colType, TSDB_DATA_TYPE_JSON); int ret = 0; char* p = tem->colVal; @@ -395,7 +399,7 @@ static int32_t tfSearchRange(void* reader, SIndexTerm* tem, SIdxTempResult* tr) fstSliceDestroy(&key); return 0; } -static int32_t tfSearchTerm_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchTerm_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { int ret = 0; char* p = indexPackJsonData(tem); int sz = strlen(p); @@ -420,36 +424,36 @@ static int32_t tfSearchTerm_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* // deprecate api return TSDB_CODE_SUCCESS; } -static int32_t tfSearchPrefix_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchPrefix_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { // impl later return TSDB_CODE_SUCCESS; } -static int32_t tfSearchSuffix_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchSuffix_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { // impl later return TSDB_CODE_SUCCESS; } -static int32_t tfSearchRegex_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchRegex_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { // impl later return TSDB_CODE_SUCCESS; } -static int32_t tfSearchLessThan_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchLessThan_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { return tfSearchCompareFunc_JSON(reader, tem, tr, LT); } -static int32_t tfSearchLessEqual_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchLessEqual_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { return tfSearchCompareFunc_JSON(reader, tem, tr, LE); } -static int32_t tfSearchGreaterThan_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchGreaterThan_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { return tfSearchCompareFunc_JSON(reader, tem, tr, GT); } -static int32_t tfSearchGreaterEqual_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchGreaterEqual_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { return tfSearchCompareFunc_JSON(reader, tem, tr, GE); } -static int32_t tfSearchRange_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchRange_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { // impl later return TSDB_CODE_SUCCESS; } -static int32_t tfSearchCompareFunc_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr, RangeType ctype) { +static int32_t tfSearchCompareFunc_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr, RangeType ctype) { int ret = 0; int skip = 0; @@ -497,21 +501,21 @@ static int32_t tfSearchCompareFunc_JSON(void* reader, SIndexTerm* tem, SIdxTempR fstStreamBuilderDestroy(sb); return TSDB_CODE_SUCCESS; } -int tfileReaderSearch(TFileReader* reader, SIndexTermQuery* query, SIdxTempResult* tr) { +int tfileReaderSearch(TFileReader* reader, SIndexTermQuery* query, SIdxTRslt* tr) { SIndexTerm* term = query->term; EIndexQueryType qtype = query->qType; - + int ret = 0; if (INDEX_TYPE_CONTAIN_EXTERN_TYPE(term->colType, TSDB_DATA_TYPE_JSON)) { - return tfSearch[1][qtype](reader, term, tr); + ret = tfSearch[1][qtype](reader, term, tr); } else { - return tfSearch[0][qtype](reader, term, tr); + ret = tfSearch[0][qtype](reader, term, tr); } tfileReaderUnRef(reader); - return 0; + return ret; } -TFileWriter* tfileWriterOpen(char* path, uint64_t suid, int32_t version, const char* colName, uint8_t colType) { +TFileWriter* tfileWriterOpen(char* path, uint64_t suid, int64_t version, const char* colName, uint8_t colType) { char fullname[256] = {0}; tfileGenFileFullName(fullname, path, suid, colName, version); // indexInfo("open write file name %s", fullname); @@ -528,7 +532,7 @@ TFileWriter* tfileWriterOpen(char* path, uint64_t suid, int32_t version, const c return tfileWriterCreate(wcx, &tfh); } -TFileReader* tfileReaderOpen(char* path, uint64_t suid, int32_t version, const char* colName) { +TFileReader* tfileReaderOpen(char* path, uint64_t suid, int64_t version, const char* colName) { char fullname[256] = {0}; tfileGenFileFullName(fullname, path, suid, colName, version); @@ -538,7 +542,7 @@ TFileReader* tfileReaderOpen(char* path, uint64_t suid, int32_t version, const c indexError("failed to open readonly file: %s, reason: %s", fullname, terrstr()); return NULL; } - indexInfo("open read file name:%s, file size: %d", wc->file.buf, wc->file.size); + indexTrace("open read file name:%s, file size: %d", wc->file.buf, wc->file.size); TFileReader* reader = tfileReaderCreate(wc); return reader; @@ -656,7 +660,7 @@ IndexTFile* indexTFileCreate(const char* path) { tfileCacheDestroy(cache); return NULL; } - + taosThreadMutexInit(&tfile->mtx, NULL); tfile->cache = cache; return tfile; } @@ -664,11 +668,12 @@ void indexTFileDestroy(IndexTFile* tfile) { if (tfile == NULL) { return; } + taosThreadMutexDestroy(&tfile->mtx); tfileCacheDestroy(tfile->cache); taosMemoryFree(tfile); } -int indexTFileSearch(void* tfile, SIndexTermQuery* query, SIdxTempResult* result) { +int indexTFileSearch(void* tfile, SIndexTermQuery* query, SIdxTRslt* result) { int ret = -1; if (tfile == NULL) { return ret; @@ -679,7 +684,10 @@ int indexTFileSearch(void* tfile, SIndexTermQuery* query, SIdxTempResult* result SIndexTerm* term = query->term; ICacheKey key = {.suid = term->suid, .colType = term->colType, .colName = term->colName, .nColName = term->nColName}; + + taosThreadMutexLock(&pTfile->mtx); TFileReader* reader = tfileCacheGet(pTfile->cache, &key); + taosThreadMutexUnlock(&pTfile->mtx); if (reader == NULL) { return 0; } @@ -779,8 +787,13 @@ TFileReader* tfileGetReaderByCol(IndexTFile* tf, uint64_t suid, char* colName) { if (tf == NULL) { return NULL; } - ICacheKey key = {.suid = suid, .colType = TSDB_DATA_TYPE_BINARY, .colName = colName, .nColName = strlen(colName)}; - return tfileCacheGet(tf->cache, &key); + TFileReader* rd = NULL; + ICacheKey key = {.suid = suid, .colType = TSDB_DATA_TYPE_BINARY, .colName = colName, .nColName = strlen(colName)}; + + taosThreadMutexLock(&tf->mtx); + rd = tfileCacheGet(tf->cache, &key); + taosThreadMutexUnlock(&tf->mtx); + return rd; } static int tfileUidCompare(const void* a, const void* b) { @@ -1012,7 +1025,7 @@ void tfileReaderUnRef(TFileReader* reader) { static SArray* tfileGetFileList(const char* path) { char buf[128] = {0}; uint64_t suid; - uint32_t version; + int64_t version; SArray* files = taosArrayInit(4, sizeof(void*)); TdDirPtr pDir = taosOpenDir(path); @@ -1052,19 +1065,19 @@ static int tfileCompare(const void* a, const void* b) { return strcmp(as, bs); } -static int tfileParseFileName(const char* filename, uint64_t* suid, char* col, int* version) { - if (3 == sscanf(filename, "%" PRIu64 "-%[^-]-%d.tindex", suid, col, version)) { +static int tfileParseFileName(const char* filename, uint64_t* suid, char* col, int64_t* version) { + if (3 == sscanf(filename, "%" PRIu64 "-%[^-]-%" PRId64 ".tindex", suid, col, version)) { // read suid & colid & version success return 0; } return -1; } // tfile name suid-colId-version.tindex -static void tfileGenFileName(char* filename, uint64_t suid, const char* col, int version) { - sprintf(filename, "%" PRIu64 "-%s-%d.tindex", suid, col, version); +static void tfileGenFileName(char* filename, uint64_t suid, const char* col, int64_t version) { + sprintf(filename, "%" PRIu64 "-%s-%" PRId64 ".tindex", suid, col, version); return; } -static void tfileGenFileFullName(char* fullname, const char* path, uint64_t suid, const char* col, int32_t version) { +static void tfileGenFileFullName(char* fullname, const char* path, uint64_t suid, const char* col, int64_t version) { char filename[128] = {0}; tfileGenFileName(filename, suid, col, version); sprintf(fullname, "%s/%s", path, filename); diff --git a/source/libs/index/src/indexUtil.c b/source/libs/index/src/indexUtil.c index a618787fd49c96b729e782b4a01a5374c76639be..1d2027889572fcd809e378dcae13560b0bae51c1 100644 --- a/source/libs/index/src/indexUtil.c +++ b/source/libs/index/src/indexUtil.c @@ -36,24 +36,24 @@ static int iBinarySearch(SArray *arr, int s, int e, uint64_t k) { return s; } -void iIntersection(SArray *inters, SArray *final) { - int32_t sz = (int32_t)taosArrayGetSize(inters); +void iIntersection(SArray *in, SArray *out) { + int32_t sz = (int32_t)taosArrayGetSize(in); if (sz <= 0) { return; } MergeIndex *mi = taosMemoryCalloc(sz, sizeof(MergeIndex)); for (int i = 0; i < sz; i++) { - SArray *t = taosArrayGetP(inters, i); + SArray *t = taosArrayGetP(in, i); mi[i].len = (int32_t)taosArrayGetSize(t); mi[i].idx = 0; } - SArray *base = taosArrayGetP(inters, 0); + SArray *base = taosArrayGetP(in, 0); for (int i = 0; i < taosArrayGetSize(base); i++) { uint64_t tgt = *(uint64_t *)taosArrayGet(base, i); bool has = true; - for (int j = 1; j < taosArrayGetSize(inters); j++) { - SArray *oth = taosArrayGetP(inters, j); + for (int j = 1; j < taosArrayGetSize(in); j++) { + SArray *oth = taosArrayGetP(in, j); int mid = iBinarySearch(oth, mi[j].idx, mi[j].len - 1, tgt); if (mid >= 0 && mid < mi[j].len) { uint64_t val = *(uint64_t *)taosArrayGet(oth, mid); @@ -64,33 +64,33 @@ void iIntersection(SArray *inters, SArray *final) { } } if (has == true) { - taosArrayPush(final, &tgt); + taosArrayPush(out, &tgt); } } taosMemoryFreeClear(mi); } -void iUnion(SArray *inters, SArray *final) { - int32_t sz = (int32_t)taosArrayGetSize(inters); +void iUnion(SArray *in, SArray *out) { + int32_t sz = (int32_t)taosArrayGetSize(in); if (sz <= 0) { return; } if (sz == 1) { - taosArrayAddAll(final, taosArrayGetP(inters, 0)); + taosArrayAddAll(out, taosArrayGetP(in, 0)); return; } MergeIndex *mi = taosMemoryCalloc(sz, sizeof(MergeIndex)); for (int i = 0; i < sz; i++) { - SArray *t = taosArrayGetP(inters, i); + SArray *t = taosArrayGetP(in, i); mi[i].len = (int32_t)taosArrayGetSize(t); mi[i].idx = 0; } while (1) { - uint64_t mVal = UINT_MAX; + uint64_t mVal = UINT64_MAX; int mIdx = -1; for (int j = 0; j < sz; j++) { - SArray *t = taosArrayGetP(inters, j); + SArray *t = taosArrayGetP(in, j); if (mi[j].idx >= mi[j].len) { continue; } @@ -102,13 +102,13 @@ void iUnion(SArray *inters, SArray *final) { } if (mIdx != -1) { mi[mIdx].idx++; - if (taosArrayGetSize(final) > 0) { - uint64_t lVal = *(uint64_t *)taosArrayGetLast(final); + if (taosArrayGetSize(out) > 0) { + uint64_t lVal = *(uint64_t *)taosArrayGetLast(out); if (lVal == mVal) { continue; } } - taosArrayPush(final, &mVal); + taosArrayPush(out, &mVal); } else { break; } @@ -158,41 +158,44 @@ int verdataCompare(const void *a, const void *b) { return cmp; } -SIdxTempResult *sIdxTempResultCreate() { - SIdxTempResult *tr = taosMemoryCalloc(1, sizeof(SIdxTempResult)); +SIdxTRslt *idxTRsltCreate() { + SIdxTRslt *tr = taosMemoryCalloc(1, sizeof(SIdxTRslt)); tr->total = taosArrayInit(4, sizeof(uint64_t)); - tr->added = taosArrayInit(4, sizeof(uint64_t)); - tr->deled = taosArrayInit(4, sizeof(uint64_t)); + tr->add = taosArrayInit(4, sizeof(uint64_t)); + tr->del = taosArrayInit(4, sizeof(uint64_t)); return tr; } -void sIdxTempResultClear(SIdxTempResult *tr) { +void idxTRsltClear(SIdxTRslt *tr) { if (tr == NULL) { return; } taosArrayClear(tr->total); - taosArrayClear(tr->added); - taosArrayClear(tr->deled); + taosArrayClear(tr->add); + taosArrayClear(tr->del); } -void sIdxTempResultDestroy(SIdxTempResult *tr) { +void idxTRsltDestroy(SIdxTRslt *tr) { if (tr == NULL) { return; } taosArrayDestroy(tr->total); - taosArrayDestroy(tr->added); - taosArrayDestroy(tr->deled); + taosArrayDestroy(tr->add); + taosArrayDestroy(tr->del); } -void sIdxTempResultMergeTo(SArray *result, SIdxTempResult *tr) { +void idxTRsltMergeTo(SIdxTRslt *tr, SArray *result) { taosArraySort(tr->total, uidCompare); - taosArraySort(tr->added, uidCompare); - taosArraySort(tr->deled, uidCompare); - - SArray *arrs = taosArrayInit(2, sizeof(void *)); - taosArrayPush(arrs, &tr->total); - taosArrayPush(arrs, &tr->added); - - iUnion(arrs, result); - taosArrayDestroy(arrs); - - iExcept(result, tr->deled); + taosArraySort(tr->add, uidCompare); + taosArraySort(tr->del, uidCompare); + + if (taosArrayGetSize(tr->total) == 0 || taosArrayGetSize(tr->add) == 0) { + SArray *t = taosArrayGetSize(tr->total) == 0 ? tr->add : tr->total; + taosArrayAddAll(result, t); + } else { + SArray *arrs = taosArrayInit(2, sizeof(void *)); + taosArrayPush(arrs, &tr->total); + taosArrayPush(arrs, &tr->add); + iUnion(arrs, result); + taosArrayDestroy(arrs); + } + iExcept(result, tr->del); } diff --git a/source/libs/index/test/CMakeLists.txt b/source/libs/index/test/CMakeLists.txt index c0b47e74c6b0561141806dae8ce14ab4d632ec8e..2835084a81b87e358916c20ce0e6c70cf6884021 100644 --- a/source/libs/index/test/CMakeLists.txt +++ b/source/libs/index/test/CMakeLists.txt @@ -1,74 +1,74 @@ -add_executable(indexTest "") -add_executable(fstTest "") -add_executable(fstUT "") -add_executable(UtilUT "") -add_executable(jsonUT "") +add_executable(idxTest "") +add_executable(idxFstTest "") +add_executable(idxFstUT "") +add_executable(idxUtilUT "") +add_executable(idxJsonUT "") -target_sources(indexTest +target_sources(idxTest PRIVATE "indexTests.cc" ) -target_sources(fstTest +target_sources(idxFstTest PRIVATE "fstTest.cc" ) -target_sources(fstUT +target_sources(idxFstUT PRIVATE "fstUT.cc" ) -target_sources(UtilUT +target_sources(idxUtilUT PRIVATE "utilUT.cc" ) -target_sources(jsonUT +target_sources(idxJsonUT PRIVATE "jsonUT.cc" ) -target_include_directories ( indexTest +target_include_directories (idxTest PUBLIC "${TD_SOURCE_DIR}/include/libs/index" "${CMAKE_CURRENT_SOURCE_DIR}/../inc" ) -target_include_directories ( fstTest +target_include_directories (idxFstTest PUBLIC "${TD_SOURCE_DIR}/include/libs/index" "${CMAKE_CURRENT_SOURCE_DIR}/../inc" ) -target_include_directories ( fstUT +target_include_directories (idxFstUT PUBLIC "${TD_SOURCE_DIR}/include/libs/index" "${CMAKE_CURRENT_SOURCE_DIR}/../inc" ) -target_include_directories ( UtilUT +target_include_directories (idxUtilUT PUBLIC "${TD_SOURCE_DIR}/include/libs/index" "${CMAKE_CURRENT_SOURCE_DIR}/../inc" ) -target_include_directories (jsonUT +target_include_directories (idxJsonUT PUBLIC "${TD_SOURCE_DIR}/include/libs/index" "${CMAKE_CURRENT_SOURCE_DIR}/../inc" ) -target_link_libraries (indexTest +target_link_libraries (idxTest os util common gtest_main index ) -target_link_libraries (fstTest +target_link_libraries (idxFstTest os util common gtest_main index ) -target_link_libraries (fstUT +target_link_libraries (idxFstUT os util common @@ -76,7 +76,7 @@ target_link_libraries (fstUT index ) -target_link_libraries (UtilUT +target_link_libraries (idxUtilUT os util common @@ -84,7 +84,7 @@ target_link_libraries (UtilUT index ) -target_link_libraries (jsonUT +target_link_libraries (idxJsonUT os util common @@ -92,19 +92,21 @@ target_link_libraries (jsonUT index ) -add_test( - NAME idxtest - COMMAND indexTest -) -add_test( - NAME idxJsonUT - COMMAND jsonUT -) +if(NOT TD_WINDOWS) + add_test( + NAME idxtest + COMMAND idxTest + ) + add_test( + NAME idxJsonUT + COMMAND idxJsonUT + ) +endif(NOT TD_WINDOWS) add_test( NAME idxUtilUT - COMMAND UtilUT + COMMAND idxUtilUT ) add_test( NAME idxFstUT - COMMAND fstUT + COMMAND idxFstUT ) diff --git a/source/libs/index/test/fstTest.cc b/source/libs/index/test/fstTest.cc index 679e24f1a7eea48ef815b59c662d9212d755004c..a2d7adf1c7fc6e9ea2c1996206280f953939fd65 100644 --- a/source/libs/index/test/fstTest.cc +++ b/source/libs/index/test/fstTest.cc @@ -15,7 +15,7 @@ #include "tutil.h" void* callback(void* s) { return s; } -static std::string fileName = "/tmp/tindex.tindex"; +static std::string fileName = TD_TMP_DIR_PATH "tindex.tindex"; class FstWriter { public: FstWriter() { @@ -48,7 +48,7 @@ class FstWriter { class FstReadMemory { public: - FstReadMemory(int32_t size, const std::string& fileName = "/tmp/tindex.tindex") { + FstReadMemory(int32_t size, const std::string& fileName = TD_TMP_DIR_PATH "tindex.tindex") { _wc = writerCtxCreate(TFile, fileName.c_str(), true, 64 * 1024); _w = fstCountingWriterCreate(_wc); _size = size; diff --git a/source/libs/index/test/fstUT.cc b/source/libs/index/test/fstUT.cc index ab6c1a47045ea4957dcfd57ccda8c9bbcd8d7a90..136c4dafecdceba665d4a8657ed2053fa2486675 100644 --- a/source/libs/index/test/fstUT.cc +++ b/source/libs/index/test/fstUT.cc @@ -17,7 +17,7 @@ #include "tskiplist.h" #include "tutil.h" -static std::string dir = "/tmp/index"; +static std::string dir = TD_TMP_DIR_PATH "index"; static char indexlog[PATH_MAX] = {0}; static char tindex[PATH_MAX] = {0}; diff --git a/source/libs/index/test/indexTests.cc b/source/libs/index/test/indexTests.cc index 733f1b4ed1f49a7c25a1f7d2c5be8466cd75bd15..74a30c3387ea3c3133e4e4f82ffd3dd8dc38f540 100644 --- a/source/libs/index/test/indexTests.cc +++ b/source/libs/index/test/indexTests.cc @@ -51,7 +51,7 @@ class DebugInfo { class FstWriter { public: FstWriter() { - _wc = writerCtxCreate(TFile, "/tmp/tindex", false, 64 * 1024 * 1024); + _wc = writerCtxCreate(TFile, TD_TMP_DIR_PATH "tindex", false, 64 * 1024 * 1024); _b = fstBuilderCreate(NULL, 0); } bool Put(const std::string& key, uint64_t val) { @@ -75,7 +75,7 @@ class FstWriter { class FstReadMemory { public: FstReadMemory(size_t size) { - _wc = writerCtxCreate(TFile, "/tmp/tindex", true, 64 * 1024); + _wc = writerCtxCreate(TFile, TD_TMP_DIR_PATH "tindex", true, 64 * 1024); _w = fstCountingWriterCreate(_wc); _size = size; memset((void*)&_s, 0, sizeof(_s)); @@ -272,9 +272,26 @@ void validateFst() { } delete m; } +static std::string logDir = TD_TMP_DIR_PATH "log"; + +static void initLog() { + const char* defaultLogFileNamePrefix = "taoslog"; + const int32_t maxLogFileNum = 10; + + tsAsyncLog = 0; + idxDebugFlag = 143; + strcpy(tsLogDir, logDir.c_str()); + taosRemoveDir(tsLogDir); + taosMkDir(tsLogDir); + + if (taosInitLog(defaultLogFileNamePrefix, maxLogFileNum) < 0) { + printf("failed to open log file in directory:%s\n", tsLogDir); + } +} class IndexEnv : public ::testing::Test { protected: virtual void SetUp() { + initLog(); taosRemoveDir(path); opts = indexOptsCreate(); int ret = indexOpen(opts, path, &index); @@ -285,7 +302,7 @@ class IndexEnv : public ::testing::Test { indexOptsDestroy(opts); } - const char* path = "/tmp/tindex"; + const char* path = TD_TMP_DIR_PATH "tindex"; SIndexOpts* opts; SIndex* index; }; @@ -342,7 +359,7 @@ class IndexEnv : public ::testing::Test { class TFileObj { public: - TFileObj(const std::string& path = "/tmp/tindex", const std::string& colName = "voltage") + TFileObj(const std::string& path = TD_TMP_DIR_PATH "tindex", const std::string& colName = "voltage") : path_(path), colName_(colName) { colId_ = 10; reader_ = NULL; @@ -370,7 +387,7 @@ class TFileObj { std::string path(path_); int colId = 2; char buf[64] = {0}; - sprintf(buf, "%" PRIu64 "-%d-%d.tindex", header.suid, colId_, header.version); + sprintf(buf, "%" PRIu64 "-%d-%" PRId64 ".tindex", header.suid, colId_, header.version); path.append("/").append(buf); fileName_ = path; @@ -394,12 +411,12 @@ class TFileObj { // // } - SIdxTempResult* tr = sIdxTempResultCreate(); + SIdxTRslt* tr = idxTRsltCreate(); int ret = tfileReaderSearch(reader_, query, tr); - sIdxTempResultMergeTo(result, tr); - sIdxTempResultDestroy(tr); + idxTRsltMergeTo(tr, result); + idxTRsltDestroy(tr); return ret; } ~TFileObj() { @@ -437,7 +454,7 @@ class IndexTFileEnv : public ::testing::Test { // tfileWriterDestroy(twrite); } TFileObj* fObj; - std::string dir = "/tmp/tindex"; + std::string dir = TD_TMP_DIR_PATH "tindex"; std::string colName = "voltage"; int coldId = 2; @@ -514,11 +531,11 @@ class CacheObj { indexCacheDebug(cache); } int Get(SIndexTermQuery* query, int16_t colId, int32_t version, SArray* result, STermValueType* s) { - SIdxTempResult* tr = sIdxTempResultCreate(); + SIdxTRslt* tr = idxTRsltCreate(); int ret = indexCacheSearch(cache, query, tr, s); - sIdxTempResultMergeTo(result, tr); - sIdxTempResultDestroy(tr); + idxTRsltMergeTo(tr, result); + idxTRsltDestroy(tr); if (ret != 0) { std::cout << "failed to get from cache:" << ret << std::endl; @@ -657,10 +674,13 @@ class IndexObj { // opt numOfWrite = 0; numOfRead = 0; - indexInit(); + // indexInit(); } - int Init(const std::string& dir) { - taosRemoveDir(dir.c_str()); + int Init(const std::string& dir, bool remove = true) { + if (remove) { + taosRemoveDir(dir.c_str()); + taosMkDir(dir.c_str()); + } taosMkDir(dir.c_str()); int ret = indexOpen(&opts, dir.c_str(), &idx); if (ret != 0) { @@ -774,10 +794,10 @@ class IndexObj { } int sz = taosArrayGetSize(result); indexMultiTermQueryDestroy(mq); - taosArrayDestroy(result); assert(sz == 1); uint64_t* ret = (uint64_t*)taosArrayGet(result, 0); assert(val = *ret); + taosArrayDestroy(result); return sz; } @@ -804,7 +824,7 @@ class IndexObj { } ~IndexObj() { - indexCleanUp(); + // indexCleanUp(); indexClose(idx); } @@ -817,12 +837,18 @@ class IndexObj { class IndexEnv2 : public ::testing::Test { protected: - virtual void SetUp() { index = new IndexObj(); } - virtual void TearDown() { delete index; } - IndexObj* index; + virtual void SetUp() { + initLog(); + index = new IndexObj(); + } + virtual void TearDown() { + // taosMsleep(500); + delete index; + } + IndexObj* index; }; TEST_F(IndexEnv2, testIndexOpen) { - std::string path = "/tmp/test"; + std::string path = TD_TMP_DIR_PATH "test"; if (index->Init(path) != 0) { std::cout << "failed to init index" << std::endl; exit(1); @@ -884,14 +910,37 @@ TEST_F(IndexEnv2, testIndexOpen) { SArray* result = (SArray*)taosArrayInit(1, sizeof(uint64_t)); index->Search(mq, result); std::cout << "target size: " << taosArrayGetSize(result) << std::endl; - assert(taosArrayGetSize(result) == 400); + EXPECT_EQ(400, taosArrayGetSize(result)); taosArrayDestroy(result); indexMultiTermQueryDestroy(mq); } } +TEST_F(IndexEnv2, testEmptyIndexOpen) { + std::string path = TD_TMP_DIR_PATH "test"; + if (index->Init(path) != 0) { + std::cout << "failed to init index" << std::endl; + exit(1); + } + + int targetSize = 1; + { + std::string colName("tag1"), colVal("Hello"); + + SIndexTerm* term = indexTermCreate(0, ADD_VALUE, TSDB_DATA_TYPE_BINARY, colName.c_str(), colName.size(), + colVal.c_str(), colVal.size()); + SIndexMultiTerm* terms = indexMultiTermCreate(); + indexMultiTermAdd(terms, term); + for (size_t i = 0; i < targetSize; i++) { + int tableId = i; + int ret = index->Put(terms, tableId); + assert(ret == 0); + } + indexMultiTermDestroy(terms); + } +} TEST_F(IndexEnv2, testIndex_TrigeFlush) { - std::string path = "/tmp/testxxx"; + std::string path = TD_TMP_DIR_PATH "testxxx"; if (index->Init(path) != 0) { // r std::cout << "failed to init" << std::endl; @@ -904,17 +953,19 @@ TEST_F(IndexEnv2, testIndex_TrigeFlush) { } static void single_write_and_search(IndexObj* idx) { - int target = idx->SearchOne("tag1", "Hello"); - target = idx->SearchOne("tag2", "Test"); + // int target = idx->SearchOne("tag1", "Hello"); + // target = idx->SearchOne("tag2", "Test"); } static void multi_write_and_search(IndexObj* idx) { + idx->PutOne("tag1", "Hello"); + idx->PutOne("tag2", "Test"); int target = idx->SearchOne("tag1", "Hello"); target = idx->SearchOne("tag2", "Test"); idx->WriteMultiMillonData("tag1", "hello world test", 100 * 100); idx->WriteMultiMillonData("tag2", "world test nothing", 100 * 10); } TEST_F(IndexEnv2, testIndex_serarch_cache_and_tfile) { - std::string path = "/tmp/cache_and_tfile"; + std::string path = TD_TMP_DIR_PATH "cache_and_tfile"; if (index->Init(path) != 0) { // opt } @@ -934,7 +985,7 @@ TEST_F(IndexEnv2, testIndex_serarch_cache_and_tfile) { } } TEST_F(IndexEnv2, testIndex_MultiWrite_and_MultiRead) { - std::string path = "/tmp/cache_and_tfile"; + std::string path = TD_TMP_DIR_PATH "cache_and_tfile"; if (index->Init(path) != 0) { } @@ -949,16 +1000,16 @@ TEST_F(IndexEnv2, testIndex_MultiWrite_and_MultiRead) { } } -// TEST_F(IndexEnv2, testIndex_restart) { -// std::string path = "/tmp/cache_and_tfile"; -// if (index->Init(path) != 0) { -// } -// index->SearchOneTarget("tag1", "Hello", 10); -// index->SearchOneTarget("tag2", "Test", 10); -//} +TEST_F(IndexEnv2, testIndex_restart) { + std::string path = TD_TMP_DIR_PATH "cache_and_tfile"; + if (index->Init(path, false) != 0) { + } + index->SearchOneTarget("tag1", "Hello", 10); + index->SearchOneTarget("tag2", "Test", 10); +} // TEST_F(IndexEnv2, testIndex_restart1) { -// std::string path = "/tmp/cache_and_tfile"; -// if (index->Init(path) != 0) { +// std::string path = TD_TMP_DIR_PATH "cache_and_tfile"; +// if (index->Init(path, false) != 0) { // } // index->ReadMultiMillonData("tag1", "coding"); // index->SearchOneTarget("tag1", "Hello", 10); @@ -966,7 +1017,7 @@ TEST_F(IndexEnv2, testIndex_MultiWrite_and_MultiRead) { //} // TEST_F(IndexEnv2, testIndex_read_performance) { -// std::string path = "/tmp/cache_and_tfile"; +// std::string path = TD_TMP_DIR_PATH "cache_and_tfile"; // if (index->Init(path) != 0) { // } // index->PutOneTarge("tag1", "Hello", 12); @@ -975,18 +1026,18 @@ TEST_F(IndexEnv2, testIndex_MultiWrite_and_MultiRead) { // std::cout << "reader sz: " << index->SearchOne("tag1", "Hello") << std::endl; // assert(3 == index->SearchOne("tag1", "Hello")); //} -// TEST_F(IndexEnv2, testIndexMultiTag) { -// std::string path = "/tmp/multi_tag"; -// if (index->Init(path) != 0) { -// } -// int64_t st = taosGetTimestampUs(); -// int32_t num = 1000 * 10000; -// index->WriteMultiMillonData("tag1", "xxxxxxxxxxxxxxx", num); -// std::cout << "numOfRow: " << num << "\ttime cost:" << taosGetTimestampUs() - st << std::endl; -// // index->WriteMultiMillonData("tag2", "xxxxxxxxxxxxxxxxxxxxxxxxx", 100 * 10000); -//} +TEST_F(IndexEnv2, testIndexMultiTag) { + std::string path = TD_TMP_DIR_PATH "multi_tag"; + if (index->Init(path) != 0) { + } + int64_t st = taosGetTimestampUs(); + int32_t num = 100 * 100; + index->WriteMultiMillonData("tag1", "xxxxxxxxxxxxxxx", num); + std::cout << "numOfRow: " << num << "\ttime cost:" << taosGetTimestampUs() - st << std::endl; + // index->WriteMultiMillonData("tag2", "xxxxxxxxxxxxxxxxxxxxxxxxx", 100 * 10000); +} TEST_F(IndexEnv2, testLongComVal1) { - std::string path = "/tmp/long_colVal"; + std::string path = TD_TMP_DIR_PATH "long_colVal"; if (index->Init(path) != 0) { } // gen colVal by randstr @@ -995,7 +1046,7 @@ TEST_F(IndexEnv2, testLongComVal1) { } TEST_F(IndexEnv2, testLongComVal2) { - std::string path = "/tmp/long_colVal"; + std::string path = TD_TMP_DIR_PATH "long_colVal"; if (index->Init(path) != 0) { } // gen colVal by randstr @@ -1003,7 +1054,7 @@ TEST_F(IndexEnv2, testLongComVal2) { index->WriteMultiMillonData("tag1", randstr, 100 * 1000); } TEST_F(IndexEnv2, testLongComVal3) { - std::string path = "/tmp/long_colVal"; + std::string path = TD_TMP_DIR_PATH "long_colVal"; if (index->Init(path) != 0) { } // gen colVal by randstr @@ -1011,7 +1062,7 @@ TEST_F(IndexEnv2, testLongComVal3) { index->WriteMultiMillonData("tag1", randstr, 100 * 1000); } TEST_F(IndexEnv2, testLongComVal4) { - std::string path = "/tmp/long_colVal"; + std::string path = TD_TMP_DIR_PATH "long_colVal"; if (index->Init(path) != 0) { } // gen colVal by randstr @@ -1019,7 +1070,7 @@ TEST_F(IndexEnv2, testLongComVal4) { index->WriteMultiMillonData("tag1", randstr, 100 * 100); } TEST_F(IndexEnv2, testIndex_read_performance1) { - std::string path = "/tmp/cache_and_tfile"; + std::string path = TD_TMP_DIR_PATH "cache_and_tfile"; if (index->Init(path) != 0) { } index->PutOneTarge("tag1", "Hello", 12); @@ -1029,7 +1080,7 @@ TEST_F(IndexEnv2, testIndex_read_performance1) { EXPECT_EQ(2, index->SearchOne("tag1", "Hello")); } TEST_F(IndexEnv2, testIndex_read_performance2) { - std::string path = "/tmp/cache_and_tfile"; + std::string path = TD_TMP_DIR_PATH "cache_and_tfile"; if (index->Init(path) != 0) { } index->PutOneTarge("tag1", "Hello", 12); @@ -1039,7 +1090,7 @@ TEST_F(IndexEnv2, testIndex_read_performance2) { EXPECT_EQ(2, index->SearchOne("tag1", "Hello")); } TEST_F(IndexEnv2, testIndex_read_performance3) { - std::string path = "/tmp/cache_and_tfile"; + std::string path = TD_TMP_DIR_PATH "cache_and_tfile"; if (index->Init(path) != 0) { } index->PutOneTarge("tag1", "Hello", 12); @@ -1049,7 +1100,7 @@ TEST_F(IndexEnv2, testIndex_read_performance3) { EXPECT_EQ(2, index->SearchOne("tag1", "Hello")); } TEST_F(IndexEnv2, testIndex_read_performance4) { - std::string path = "/tmp/cache_and_tfile"; + std::string path = TD_TMP_DIR_PATH "cache_and_tfile"; if (index->Init(path) != 0) { } index->PutOneTarge("tag10", "Hello", 12); @@ -1059,7 +1110,7 @@ TEST_F(IndexEnv2, testIndex_read_performance4) { EXPECT_EQ(1, index->SearchOne("tag10", "Hello")); } TEST_F(IndexEnv2, testIndex_cache_del) { - std::string path = "/tmp/cache_and_tfile"; + std::string path = TD_TMP_DIR_PATH "cache_and_tfile"; if (index->Init(path) != 0) { } for (int i = 0; i < 100; i++) { @@ -1098,7 +1149,7 @@ TEST_F(IndexEnv2, testIndex_cache_del) { } TEST_F(IndexEnv2, testIndex_del) { - std::string path = "/tmp/cache_and_tfile"; + std::string path = TD_TMP_DIR_PATH "cache_and_tfile"; if (index->Init(path) != 0) { } for (int i = 0; i < 100; i++) { diff --git a/source/libs/executor/test/index_executor_tests.cpp b/source/libs/index/test/index_executor_tests.cpp similarity index 98% rename from source/libs/executor/test/index_executor_tests.cpp rename to source/libs/index/test/index_executor_tests.cpp index 2449bd1da1824ec94e4c071a0052950b29aeabe1..b88ffe5b8bdb2058a66d1e56020206643c246e42 100644 --- a/source/libs/executor/test/index_executor_tests.cpp +++ b/source/libs/index/test/index_executor_tests.cpp @@ -24,11 +24,7 @@ #pragma GCC diagnostic ignored "-Wunused-variable" #pragma GCC diagnostic ignored "-Wsign-compare" -#include "executor.h" -#include "executorimpl.h" -#include "indexoperator.h" -#include "os.h" - +#include "index.h" #include "stub.h" #include "taos.h" #include "tcompare.h" @@ -57,7 +53,7 @@ void sifInitLogFile() { tsAsyncLog = 0; qDebugFlag = 159; - strcpy(tsLogDir, "/tmp/sif"); + strcpy(tsLogDir, TD_TMP_DIR_PATH "sif"); taosRemoveDir(tsLogDir); taosMkDir(tsLogDir); diff --git a/source/libs/index/test/jsonUT.cc b/source/libs/index/test/jsonUT.cc index e827d1763f2b9e505118f6d0b61a26e82f83aa55..cd5a5d9b0f192883f67e9dfecdbcb3854669fdf3 100644 --- a/source/libs/index/test/jsonUT.cc +++ b/source/libs/index/test/jsonUT.cc @@ -16,15 +16,15 @@ #include "tskiplist.h" #include "tutil.h" -static std::string dir = "/tmp/json"; -static std::string logDir = "/tmp/log"; +static std::string dir = TD_TMP_DIR_PATH "json"; +static std::string logDir = TD_TMP_DIR_PATH "log"; static void initLog() { const char* defaultLogFileNamePrefix = "taoslog"; const int32_t maxLogFileNum = 10; tsAsyncLog = 0; - sDebugFlag = 143; + idxDebugFlag = 143; strcpy(tsLogDir, logDir.c_str()); taosRemoveDir(tsLogDir); taosMkDir(tsLogDir); diff --git a/source/libs/index/test/utilUT.cc b/source/libs/index/test/utilUT.cc index 18a2b457c41c2cd66f20a01f3690d0af4fe69d3d..4a30160244d82b8c00b3e7b031d6fd492057ec21 100644 --- a/source/libs/index/test/utilUT.cc +++ b/source/libs/index/test/utilUT.cc @@ -226,6 +226,22 @@ TEST_F(UtilEnv, 04union) { iUnion(src, rslt); assert(taosArrayGetSize(rslt) == 12); } +TEST_F(UtilEnv, 05unionExcept) { + clearSourceArray(src); + clearFinalArray(rslt); + + uint64_t arr2[] = {7}; + SArray * f = (SArray *)taosArrayGetP(src, 1); + for (int i = 0; i < sizeof(arr2) / sizeof(arr2[0]); i++) { + taosArrayPush(f, &arr2[i]); + } + + iUnion(src, rslt); + + SArray *ept = taosArrayInit(0, sizeof(uint64_t)); + iExcept(rslt, ept); + EXPECT_EQ(taosArrayGetSize(rslt), 1); +} TEST_F(UtilEnv, 01Except) { SArray *total = taosArrayInit(4, sizeof(uint64_t)); { @@ -308,16 +324,36 @@ TEST_F(UtilEnv, 01Except) { ASSERT_EQ(*(uint64_t *)taosArrayGet(total, 1), 100); } TEST_F(UtilEnv, testFill) { - for (int i = 0; i < 10000000; i++) { + for (int i = 0; i < 1000000; i++) { int64_t val = i; char buf[65] = {0}; indexInt2str(val, buf, 1); EXPECT_EQ(val, taosStr2int64(buf)); } - for (int i = 0; i < 10000000; i++) { + for (int i = 0; i < 1000000; i++) { int64_t val = 0 - i; char buf[65] = {0}; indexInt2str(val, buf, -1); EXPECT_EQ(val, taosStr2int64(buf)); } } +TEST_F(UtilEnv, TempResult) { + SIdxTRslt *relt = idxTRsltCreate(); + + SArray *f = taosArrayInit(0, sizeof(uint64_t)); + + uint64_t val = UINT64_MAX - 1; + taosArrayPush(relt->add, &val); + idxTRsltMergeTo(relt, f); + EXPECT_EQ(taosArrayGetSize(f), 1); +} +TEST_F(UtilEnv, TempResultExcept) { + SIdxTRslt *relt = idxTRsltCreate(); + + SArray *f = taosArrayInit(0, sizeof(uint64_t)); + + uint64_t val = UINT64_MAX; + taosArrayPush(relt->add, &val); + idxTRsltMergeTo(relt, f); + EXPECT_EQ(taosArrayGetSize(f), 1); +} diff --git a/source/libs/monitor/src/monMain.c b/source/libs/monitor/src/monMain.c index 3ece089a2821a4e9db0a5e66853c01a224a2e78c..bf857ad718d27f1057529824cfd9cc53106a73bb 100644 --- a/source/libs/monitor/src/monMain.c +++ b/source/libs/monitor/src/monMain.c @@ -530,7 +530,8 @@ void monSendReport() { monGenLogJson(pMonitor); char *pCont = tjsonToString(pMonitor->pJson); - if (pCont != NULL) { + // uDebugL("report cont:%s\n", pCont); + if (pCont != NULL) { EHttpCompFlag flag = tsMonitor.cfg.comp ? HTTP_GZIP : HTTP_FLAT; if (taosSendHttpReport(tsMonitor.cfg.server, tsMonitor.cfg.port, pCont, strlen(pCont), flag) != 0) { uError("failed to send monitor msg"); diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c index 8019200e7653c72f2993b3b6b2b9303ddef6085b..68d3741b482105d02d4751847f01f3fbdc32986f 100644 --- a/source/libs/nodes/src/nodesCloneFuncs.c +++ b/source/libs/nodes/src/nodesCloneFuncs.c @@ -19,6 +19,21 @@ #include "taos.h" #include "taoserror.h" +#define COPY_SCALAR_FIELD(fldname) \ + do { \ + (pDst)->fldname = (pSrc)->fldname; \ + } while (0) + +#define COPY_CHAR_ARRAY_FIELD(fldname) \ + do { \ + strcpy((pDst)->fldname, (pSrc)->fldname); \ + } while (0) + +#define COPY_OBJECT_FIELD(fldname, size) \ + do { \ + memcpy(&((pDst)->fldname), &((pSrc)->fldname), size); \ + } while (0) + #define COPY_CHAR_POINT_FIELD(fldname) \ do { \ if (NULL == (pSrc)->fldname) { \ @@ -70,27 +85,61 @@ } \ } while (0) -static void dataTypeCopy(const SDataType* pSrc, SDataType* pDst) {} - static SNode* exprNodeCopy(const SExprNode* pSrc, SExprNode* pDst) { - dataTypeCopy(&pSrc->resType, &pDst->resType); - pDst->pAssociation = NULL; + COPY_OBJECT_FIELD(resType, sizeof(SDataType)); + COPY_CHAR_ARRAY_FIELD(aliasName); + COPY_CHAR_ARRAY_FIELD(userAlias); return (SNode*)pDst; } static SNode* columnNodeCopy(const SColumnNode* pSrc, SColumnNode* pDst) { COPY_BASE_OBJECT_FIELD(node, exprNodeCopy); - pDst->pProjectRef = NULL; + COPY_SCALAR_FIELD(tableId); + COPY_SCALAR_FIELD(tableType); + COPY_SCALAR_FIELD(colId); + COPY_SCALAR_FIELD(colType); + COPY_CHAR_ARRAY_FIELD(dbName); + COPY_CHAR_ARRAY_FIELD(tableName); + COPY_CHAR_ARRAY_FIELD(tableAlias); + COPY_CHAR_ARRAY_FIELD(colName); + COPY_SCALAR_FIELD(dataBlockId); + COPY_SCALAR_FIELD(slotId); return (SNode*)pDst; } static SNode* valueNodeCopy(const SValueNode* pSrc, SValueNode* pDst) { COPY_BASE_OBJECT_FIELD(node, exprNodeCopy); COPY_CHAR_POINT_FIELD(literal); + COPY_SCALAR_FIELD(isDuration); + COPY_SCALAR_FIELD(translate); + COPY_SCALAR_FIELD(notReserved); + COPY_SCALAR_FIELD(placeholderNo); + COPY_SCALAR_FIELD(typeData); + COPY_SCALAR_FIELD(unit); if (!pSrc->translate) { return (SNode*)pDst; } switch (pSrc->node.resType.type) { + case TSDB_DATA_TYPE_BOOL: + COPY_SCALAR_FIELD(datum.b); + break; + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_TIMESTAMP: + COPY_SCALAR_FIELD(datum.i); + break; + case TSDB_DATA_TYPE_FLOAT: + case TSDB_DATA_TYPE_DOUBLE: + COPY_SCALAR_FIELD(datum.d); + break; + case TSDB_DATA_TYPE_UTINYINT: + case TSDB_DATA_TYPE_USMALLINT: + case TSDB_DATA_TYPE_UINT: + case TSDB_DATA_TYPE_UBIGINT: + COPY_SCALAR_FIELD(datum.u); + break; case TSDB_DATA_TYPE_NCHAR: case TSDB_DATA_TYPE_VARCHAR: case TSDB_DATA_TYPE_VARBINARY: @@ -104,7 +153,7 @@ static SNode* valueNodeCopy(const SValueNode* pSrc, SValueNode* pDst) { case TSDB_DATA_TYPE_JSON: case TSDB_DATA_TYPE_DECIMAL: case TSDB_DATA_TYPE_BLOB: - // todo + case TSDB_DATA_TYPE_MEDIUMBLOB: default: break; } @@ -113,6 +162,7 @@ static SNode* valueNodeCopy(const SValueNode* pSrc, SValueNode* pDst) { static SNode* operatorNodeCopy(const SOperatorNode* pSrc, SOperatorNode* pDst) { COPY_BASE_OBJECT_FIELD(node, exprNodeCopy); + COPY_SCALAR_FIELD(opType); CLONE_NODE_FIELD(pLeft); CLONE_NODE_FIELD(pRight); return (SNode*)pDst; @@ -120,18 +170,28 @@ static SNode* operatorNodeCopy(const SOperatorNode* pSrc, SOperatorNode* pDst) { static SNode* logicConditionNodeCopy(const SLogicConditionNode* pSrc, SLogicConditionNode* pDst) { COPY_BASE_OBJECT_FIELD(node, exprNodeCopy); + COPY_SCALAR_FIELD(condType); CLONE_NODE_LIST_FIELD(pParameterList); return (SNode*)pDst; } static SNode* functionNodeCopy(const SFunctionNode* pSrc, SFunctionNode* pDst) { COPY_BASE_OBJECT_FIELD(node, exprNodeCopy); + COPY_CHAR_ARRAY_FIELD(functionName); + COPY_SCALAR_FIELD(funcId); + COPY_SCALAR_FIELD(funcType); CLONE_NODE_LIST_FIELD(pParameterList); + COPY_SCALAR_FIELD(udfBufSize); return (SNode*)pDst; } static SNode* tableNodeCopy(const STableNode* pSrc, STableNode* pDst) { COPY_BASE_OBJECT_FIELD(node, exprNodeCopy); + COPY_CHAR_ARRAY_FIELD(dbName); + COPY_CHAR_ARRAY_FIELD(tableName); + COPY_CHAR_ARRAY_FIELD(tableAlias); + COPY_SCALAR_FIELD(precision); + COPY_SCALAR_FIELD(singleTable); return (SNode*)pDst; } @@ -159,6 +219,8 @@ static SNode* realTableNodeCopy(const SRealTableNode* pSrc, SRealTableNode* pDst COPY_BASE_OBJECT_FIELD(table, tableNodeCopy); CLONE_OBJECT_FIELD(pMeta, tableMetaClone); CLONE_OBJECT_FIELD(pVgroupList, vgroupsInfoClone); + COPY_CHAR_ARRAY_FIELD(qualDbName); + COPY_SCALAR_FIELD(ratio); return (SNode*)pDst; } @@ -170,6 +232,7 @@ static SNode* tempTableNodeCopy(const STempTableNode* pSrc, STempTableNode* pDst static SNode* joinTableNodeCopy(const SJoinTableNode* pSrc, SJoinTableNode* pDst) { COPY_BASE_OBJECT_FIELD(table, tableNodeCopy); + COPY_SCALAR_FIELD(joinType); CLONE_NODE_FIELD(pLeft); CLONE_NODE_FIELD(pRight); CLONE_NODE_FIELD(pOnCond); @@ -177,21 +240,30 @@ static SNode* joinTableNodeCopy(const SJoinTableNode* pSrc, SJoinTableNode* pDst } static SNode* targetNodeCopy(const STargetNode* pSrc, STargetNode* pDst) { + COPY_SCALAR_FIELD(dataBlockId); + COPY_SCALAR_FIELD(slotId); CLONE_NODE_FIELD(pExpr); return (SNode*)pDst; } static SNode* groupingSetNodeCopy(const SGroupingSetNode* pSrc, SGroupingSetNode* pDst) { + COPY_SCALAR_FIELD(groupingSetType); CLONE_NODE_LIST_FIELD(pParameterList); return (SNode*)pDst; } static SNode* orderByExprNodeCopy(const SOrderByExprNode* pSrc, SOrderByExprNode* pDst) { CLONE_NODE_FIELD(pExpr); + COPY_SCALAR_FIELD(order); + COPY_SCALAR_FIELD(nullOrder); return (SNode*)pDst; } -static SNode* limitNodeCopy(const SLimitNode* pSrc, SLimitNode* pDst) { return (SNode*)pDst; } +static SNode* limitNodeCopy(const SLimitNode* pSrc, SLimitNode* pDst) { + COPY_SCALAR_FIELD(limit); + COPY_SCALAR_FIELD(offset); + return (SNode*)pDst; +} static SNode* stateWindowNodeCopy(const SStateWindowNode* pSrc, SStateWindowNode* pDst) { CLONE_NODE_FIELD(pCol); @@ -215,13 +287,16 @@ static SNode* intervalWindowNodeCopy(const SIntervalWindowNode* pSrc, SIntervalW } static SNode* nodeListNodeCopy(const SNodeListNode* pSrc, SNodeListNode* pDst) { + COPY_OBJECT_FIELD(dataType, sizeof(SDataType)); CLONE_NODE_LIST_FIELD(pNodeList); return (SNode*)pDst; } static SNode* fillNodeCopy(const SFillNode* pSrc, SFillNode* pDst) { + COPY_SCALAR_FIELD(mode); CLONE_NODE_FIELD(pValues); CLONE_NODE_FIELD(pWStartTs); + COPY_OBJECT_FIELD(timeRange, sizeof(STimeWindow)); return (SNode*)pDst; } @@ -229,7 +304,7 @@ static SNode* logicNodeCopy(const SLogicNode* pSrc, SLogicNode* pDst) { CLONE_NODE_LIST_FIELD(pTargets); CLONE_NODE_FIELD(pConditions); CLONE_NODE_LIST_FIELD(pChildren); - pDst->pParent = NULL; + COPY_SCALAR_FIELD(optimizedFlag); return (SNode*)pDst; } @@ -239,13 +314,28 @@ static SNode* logicScanCopy(const SScanLogicNode* pSrc, SScanLogicNode* pDst) { CLONE_NODE_LIST_FIELD(pScanPseudoCols); CLONE_OBJECT_FIELD(pMeta, tableMetaClone); CLONE_OBJECT_FIELD(pVgroupList, vgroupsInfoClone); + COPY_SCALAR_FIELD(scanType); + COPY_OBJECT_FIELD(scanSeq[0], sizeof(uint8_t) * 2); + COPY_OBJECT_FIELD(scanRange, sizeof(STimeWindow)); + COPY_OBJECT_FIELD(tableName, sizeof(SName)); + COPY_SCALAR_FIELD(showRewrite); + COPY_SCALAR_FIELD(ratio); CLONE_NODE_LIST_FIELD(pDynamicScanFuncs); + COPY_SCALAR_FIELD(dataRequired); + COPY_SCALAR_FIELD(interval); + COPY_SCALAR_FIELD(offset); + COPY_SCALAR_FIELD(sliding); + COPY_SCALAR_FIELD(intervalUnit); + COPY_SCALAR_FIELD(slidingUnit); + CLONE_NODE_FIELD(pTagCond); return (SNode*)pDst; } static SNode* logicJoinCopy(const SJoinLogicNode* pSrc, SJoinLogicNode* pDst) { COPY_BASE_OBJECT_FIELD(node, logicNodeCopy); + COPY_SCALAR_FIELD(joinType); CLONE_NODE_FIELD(pOnConditions); + COPY_SCALAR_FIELD(isSingleTableJoin); return (SNode*)pDst; } @@ -259,32 +349,50 @@ static SNode* logicAggCopy(const SAggLogicNode* pSrc, SAggLogicNode* pDst) { static SNode* logicProjectCopy(const SProjectLogicNode* pSrc, SProjectLogicNode* pDst) { COPY_BASE_OBJECT_FIELD(node, logicNodeCopy); CLONE_NODE_LIST_FIELD(pProjections); + COPY_CHAR_ARRAY_FIELD(stmtName); + COPY_SCALAR_FIELD(limit); + COPY_SCALAR_FIELD(offset); + COPY_SCALAR_FIELD(slimit); + COPY_SCALAR_FIELD(soffset); return (SNode*)pDst; } static SNode* logicVnodeModifCopy(const SVnodeModifLogicNode* pSrc, SVnodeModifLogicNode* pDst) { COPY_BASE_OBJECT_FIELD(node, logicNodeCopy); - pDst->pDataBlocks = NULL; - pDst->pVgDataBlocks = NULL; + COPY_SCALAR_FIELD(msgType); return (SNode*)pDst; } static SNode* logicExchangeCopy(const SExchangeLogicNode* pSrc, SExchangeLogicNode* pDst) { COPY_BASE_OBJECT_FIELD(node, logicNodeCopy); + COPY_SCALAR_FIELD(srcGroupId); + COPY_SCALAR_FIELD(precision); return (SNode*)pDst; } static SNode* logicWindowCopy(const SWindowLogicNode* pSrc, SWindowLogicNode* pDst) { COPY_BASE_OBJECT_FIELD(node, logicNodeCopy); + COPY_SCALAR_FIELD(winType); CLONE_NODE_LIST_FIELD(pFuncs); + COPY_SCALAR_FIELD(interval); + COPY_SCALAR_FIELD(offset); + COPY_SCALAR_FIELD(sliding); + COPY_SCALAR_FIELD(intervalUnit); + COPY_SCALAR_FIELD(slidingUnit); + COPY_SCALAR_FIELD(sessionGap); CLONE_NODE_FIELD(pTspk); + CLONE_NODE_FIELD(pStateExpr); + COPY_SCALAR_FIELD(triggerType); + COPY_SCALAR_FIELD(watermark); return (SNode*)pDst; } static SNode* logicFillCopy(const SFillLogicNode* pSrc, SFillLogicNode* pDst) { COPY_BASE_OBJECT_FIELD(node, logicNodeCopy); + COPY_SCALAR_FIELD(mode); CLONE_NODE_FIELD(pWStartTs); CLONE_NODE_FIELD(pValues); + COPY_OBJECT_FIELD(timeRange, sizeof(STimeWindow)); return (SNode*)pDst; } @@ -301,28 +409,41 @@ static SNode* logicPartitionCopy(const SPartitionLogicNode* pSrc, SPartitionLogi } static SNode* logicSubplanCopy(const SLogicSubplan* pSrc, SLogicSubplan* pDst) { + COPY_OBJECT_FIELD(id, sizeof(SSubplanId)); CLONE_NODE_FIELD(pNode); - pDst->pChildren = NULL; - pDst->pParents = NULL; - pDst->pVgroupList = NULL; + COPY_SCALAR_FIELD(subplanType); + COPY_SCALAR_FIELD(level); + COPY_SCALAR_FIELD(splitFlag); return (SNode*)pDst; } static SNode* dataBlockDescCopy(const SDataBlockDescNode* pSrc, SDataBlockDescNode* pDst) { + COPY_SCALAR_FIELD(dataBlockId); CLONE_NODE_LIST_FIELD(pSlots); + COPY_SCALAR_FIELD(totalRowSize); + COPY_SCALAR_FIELD(outputRowSize); + COPY_SCALAR_FIELD(precision); return (SNode*)pDst; } static SNode* slotDescCopy(const SSlotDescNode* pSrc, SSlotDescNode* pDst) { - dataTypeCopy(&pSrc->dataType, &pDst->dataType); + COPY_SCALAR_FIELD(slotId); + COPY_OBJECT_FIELD(dataType, sizeof(SDataType)); + COPY_SCALAR_FIELD(reserve); + COPY_SCALAR_FIELD(output); + COPY_SCALAR_FIELD(tag); return (SNode*)pDst; } static SNode* downstreamSourceCopy(const SDownstreamSourceNode* pSrc, SDownstreamSourceNode* pDst) { + COPY_OBJECT_FIELD(addr, sizeof(SQueryNodeAddr)); + COPY_SCALAR_FIELD(taskId); + COPY_SCALAR_FIELD(schedId); return (SNode*)pDst; } static SNode* selectStmtCopy(const SSelectStmt* pSrc, SSelectStmt* pDst) { + COPY_SCALAR_FIELD(isDistinct); CLONE_NODE_LIST_FIELD(pProjectionList); CLONE_NODE_FIELD(pFromTable); CLONE_NODE_FIELD(pWhere); @@ -333,6 +454,12 @@ static SNode* selectStmtCopy(const SSelectStmt* pSrc, SSelectStmt* pDst) { CLONE_NODE_LIST_FIELD(pOrderByList); CLONE_NODE_FIELD(pLimit); CLONE_NODE_FIELD(pLimit); + COPY_CHAR_ARRAY_FIELD(stmtName); + COPY_SCALAR_FIELD(precision); + COPY_SCALAR_FIELD(isEmptyResult); + COPY_SCALAR_FIELD(isTimeOrderQuery); + COPY_SCALAR_FIELD(hasAggFuncs); + COPY_SCALAR_FIELD(hasRepeatScanFuncs); return (SNode*)pDst; } @@ -345,7 +472,6 @@ SNodeptr nodesCloneNode(const SNodeptr pNode) { terrno = TSDB_CODE_OUT_OF_MEMORY; return NULL; } - memcpy(pDst, pNode, nodesNodeSize(nodeType(pNode))); switch (nodeType(pNode)) { case QUERY_NODE_COLUMN: return columnNodeCopy((const SColumnNode*)pNode, (SColumnNode*)pDst); @@ -387,6 +513,8 @@ SNodeptr nodesCloneNode(const SNodeptr pNode) { return slotDescCopy((const SSlotDescNode*)pNode, (SSlotDescNode*)pDst); case QUERY_NODE_DOWNSTREAM_SOURCE: return downstreamSourceCopy((const SDownstreamSourceNode*)pNode, (SDownstreamSourceNode*)pDst); + case QUERY_NODE_LEFT_VALUE: + return pDst; case QUERY_NODE_SELECT_STMT: return selectStmtCopy((const SSelectStmt*)pNode, (SSelectStmt*)pDst); case QUERY_NODE_LOGIC_PLAN_SCAN: diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 0e8f530b0eb8d209f73cf349a4ca8dd590a2e304..78710569cbe6718c6fa899448a1cab11edebaab3 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -78,6 +78,8 @@ const char* nodesNodeName(ENodeType type) { return "TableOptions"; case QUERY_NODE_INDEX_OPTIONS: return "IndexOptions"; + case QUERY_NODE_LEFT_VALUE: + return "LeftValue"; case QUERY_NODE_SET_OPERATOR: return "SetOperator"; case QUERY_NODE_SELECT_STMT: @@ -228,6 +230,8 @@ const char* nodesNodeName(ENodeType type) { return "PhysiFill"; case QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW: return "PhysiSessionWindow"; + case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW: + return "PhysiStreamSessionWindow"; case QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW: return "PhysiStateWindow"; case QUERY_NODE_PHYSICAL_PLAN_PARTITION: @@ -490,6 +494,7 @@ static const char* jkScanLogicPlanScanCols = "ScanCols"; static const char* jkScanLogicPlanScanPseudoCols = "ScanPseudoCols"; static const char* jkScanLogicPlanTableMetaSize = "TableMetaSize"; static const char* jkScanLogicPlanTableMeta = "TableMeta"; +static const char* jkScanLogicPlanTagCond = "TagCond"; static int32_t logicScanNodeToJson(const void* pObj, SJson* pJson) { const SScanLogicNode* pNode = (const SScanLogicNode*)pObj; @@ -507,6 +512,9 @@ static int32_t logicScanNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddObject(pJson, jkScanLogicPlanTableMeta, tableMetaToJson, pNode->pMeta); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkScanLogicPlanTagCond, nodeToJson, pNode->pTagCond); + } return code; } @@ -528,6 +536,9 @@ static int32_t jsonToLogicScanNode(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = tjsonMakeObject(pJson, jkScanLogicPlanTableMeta, jsonToTableMeta, (void**)&pNode->pMeta, objSize); } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeObject(pJson, jkScanLogicPlanTagCond, &pNode->pTagCond); + } return code; } @@ -1119,6 +1130,9 @@ static const char* jkTableScanPhysiPlanOffset = "Offset"; static const char* jkTableScanPhysiPlanSliding = "Sliding"; static const char* jkTableScanPhysiPlanIntervalUnit = "intervalUnit"; static const char* jkTableScanPhysiPlanSlidingUnit = "slidingUnit"; +static const char* jkTableScanPhysiPlanTriggerType = "triggerType"; +static const char* jkTableScanPhysiPlanWatermark = "watermark"; +static const char* jkTableScanPhysiPlanTsColId = "tsColId"; static int32_t physiTableScanNodeToJson(const void* pObj, SJson* pJson) { const STableScanPhysiNode* pNode = (const STableScanPhysiNode*)pObj; @@ -1160,6 +1174,15 @@ static int32_t physiTableScanNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddIntegerToObject(pJson, jkTableScanPhysiPlanSlidingUnit, pNode->slidingUnit); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkTableScanPhysiPlanTriggerType, pNode->triggerType); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkTableScanPhysiPlanWatermark, pNode->watermark); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkTableScanPhysiPlanTsColId, pNode->tsColId); + } return code; } @@ -1210,6 +1233,15 @@ static int32_t jsonToPhysiTableScanNode(const SJson* pJson, void* pObj) { tjsonGetNumberValue(pJson, jkTableScanPhysiPlanSlidingUnit, pNode->slidingUnit, code); ; } + if (TSDB_CODE_SUCCESS == code) { + tjsonGetNumberValue(pJson, jkTableScanPhysiPlanTriggerType, pNode->triggerType, code); + } + if (TSDB_CODE_SUCCESS == code) { + tjsonGetNumberValue(pJson, jkTableScanPhysiPlanWatermark, pNode->watermark, code); + } + if (TSDB_CODE_SUCCESS == code) { + tjsonGetNumberValue(pJson, jkTableScanPhysiPlanTsColId, pNode->tsColId, code); + } return code; } @@ -1775,6 +1807,7 @@ static const char* jkSubplanDbFName = "DbFName"; static const char* jkSubplanNodeAddr = "NodeAddr"; static const char* jkSubplanRootNode = "RootNode"; static const char* jkSubplanDataSink = "DataSink"; +static const char* jkSubplanTagCond = "TagCond"; static int32_t subplanToJson(const void* pObj, SJson* pJson) { const SSubplan* pNode = (const SSubplan*)pObj; @@ -1801,6 +1834,9 @@ static int32_t subplanToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddObject(pJson, jkSubplanDataSink, nodeToJson, pNode->pDataSink); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkSubplanTagCond, nodeToJson, pNode->pTagCond); + } return code; } @@ -1831,6 +1867,9 @@ static int32_t jsonToSubplan(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = jsonToNodeObject(pJson, jkSubplanDataSink, (SNode**)&pNode->pDataSink); } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeObject(pJson, jkSubplanTagCond, (SNode**)&pNode->pTagCond); + } return code; } @@ -2175,7 +2214,7 @@ static int32_t jsonToDatum(const SJson* pJson, void* pObj) { code = TSDB_CODE_OUT_OF_MEMORY; break; } - varDataSetLen(pNode->datum.p, pNode->node.resType.bytes); + varDataSetLen(pNode->datum.p, pNode->node.resType.bytes - VARSTR_HEADER_SIZE); if (TSDB_DATA_TYPE_NCHAR == pNode->node.resType.type) { char* buf = taosMemoryCalloc(1, pNode->node.resType.bytes * 2 + VARSTR_HEADER_SIZE + 1); if (NULL == buf) { @@ -2512,6 +2551,29 @@ static int32_t jsonToOrderByExprNode(const SJson* pJson, void* pObj) { return code; } +static const char* jkSessionWindowTsPrimaryKey = "TsPrimaryKey"; +static const char* jkSessionWindowGap = "Gap"; + +static int32_t sessionWindowNodeToJson(const void* pObj, SJson* pJson) { + const SSessionWindowNode* pNode = (const SSessionWindowNode*)pObj; + + int32_t code = tjsonAddObject(pJson, jkSessionWindowTsPrimaryKey, nodeToJson, pNode->pCol); + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkSessionWindowGap, nodeToJson, pNode->pGap); + } + return code; +} + +static int32_t jsonToSessionWindowNode(const SJson* pJson, void* pObj) { + SSessionWindowNode* pNode = (SSessionWindowNode*)pObj; + + int32_t code = jsonToNodeObject(pJson, jkSessionWindowTsPrimaryKey, (SNode**)&pNode->pCol); + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeObject(pJson, jkSessionWindowGap, (SNode**)&pNode->pGap); + } + return code; +} + static const char* jkIntervalWindowInterval = "Interval"; static const char* jkIntervalWindowOffset = "Offset"; static const char* jkIntervalWindowSliding = "Sliding"; @@ -2734,6 +2796,150 @@ static int32_t jsonToDownstreamSourceNode(const SJson* pJson, void* pObj) { return code; } +static const char* jkDatabaseOptionsBuffer = "Buffer"; +static const char* jkDatabaseOptionsCachelast = "Cachelast"; +static const char* jkDatabaseOptionsCompressionLevel = "CompressionLevel"; +static const char* jkDatabaseOptionsDaysPerFileNode = "DaysPerFileNode"; +static const char* jkDatabaseOptionsDaysPerFile = "DaysPerFile"; +static const char* jkDatabaseOptionsFsyncPeriod = "FsyncPeriod"; +static const char* jkDatabaseOptionsMaxRowsPerBlock = "MaxRowsPerBlock"; +static const char* jkDatabaseOptionsMinRowsPerBlock = "MinRowsPerBlock"; +static const char* jkDatabaseOptionsKeep = "Keep"; +static const char* jkDatabaseOptionsPages = "Pages"; +static const char* jkDatabaseOptionsPagesize = "Pagesize"; +static const char* jkDatabaseOptionsPrecision = "Precision"; +static const char* jkDatabaseOptionsReplica = "Replica"; +static const char* jkDatabaseOptionsStrict = "Strict"; +static const char* jkDatabaseOptionsWalLevel = "WalLevel"; +static const char* jkDatabaseOptionsNumOfVgroups = "NumOfVgroups"; +static const char* jkDatabaseOptionsSingleStable = "SingleStable"; +static const char* jkDatabaseOptionsRetentions = "Retentions"; +static const char* jkDatabaseOptionsSchemaless = "Schemaless"; + +static int32_t databaseOptionsToJson(const void* pObj, SJson* pJson) { + const SDatabaseOptions* pNode = (const SDatabaseOptions*)pObj; + + int32_t code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsBuffer, pNode->buffer); + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsCachelast, pNode->cachelast); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsCompressionLevel, pNode->compressionLevel); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkDatabaseOptionsDaysPerFileNode, nodeToJson, pNode->pDaysPerFile); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsDaysPerFile, pNode->daysPerFile); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsFsyncPeriod, pNode->fsyncPeriod); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsMaxRowsPerBlock, pNode->maxRowsPerBlock); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsMinRowsPerBlock, pNode->minRowsPerBlock); + } + if (TSDB_CODE_SUCCESS == code) { + code = nodeListToJson(pJson, jkDatabaseOptionsKeep, pNode->pKeep); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsPages, pNode->pages); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsPagesize, pNode->pagesize); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddStringToObject(pJson, jkDatabaseOptionsPrecision, pNode->precisionStr); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsReplica, pNode->replica); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsStrict, pNode->strict); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsWalLevel, pNode->walLevel); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsNumOfVgroups, pNode->numOfVgroups); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsSingleStable, pNode->singleStable); + } + if (TSDB_CODE_SUCCESS == code) { + code = nodeListToJson(pJson, jkDatabaseOptionsRetentions, pNode->pRetentions); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsSchemaless, pNode->schemaless); + } + + return code; +} + +static int32_t jsonToDatabaseOptions(const SJson* pJson, void* pObj) { + SDatabaseOptions* pNode = (SDatabaseOptions*)pObj; + + int32_t code = tjsonGetIntValue(pJson, jkDatabaseOptionsBuffer, &pNode->buffer); + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsCachelast, &pNode->cachelast); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsCompressionLevel, &pNode->compressionLevel); + } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeObject(pJson, jkDatabaseOptionsDaysPerFileNode, (SNode**)&pNode->pDaysPerFile); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetIntValue(pJson, jkDatabaseOptionsDaysPerFile, &pNode->daysPerFile); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetIntValue(pJson, jkDatabaseOptionsFsyncPeriod, &pNode->fsyncPeriod); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetIntValue(pJson, jkDatabaseOptionsMaxRowsPerBlock, &pNode->maxRowsPerBlock); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetIntValue(pJson, jkDatabaseOptionsMinRowsPerBlock, &pNode->minRowsPerBlock); + } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeList(pJson, jkDatabaseOptionsKeep, &pNode->pKeep); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetIntValue(pJson, jkDatabaseOptionsPages, &pNode->pages); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetIntValue(pJson, jkDatabaseOptionsPagesize, &pNode->pagesize); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetStringValue(pJson, jkDatabaseOptionsPrecision, pNode->precisionStr); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsReplica, &pNode->replica); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsStrict, &pNode->strict); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsWalLevel, &pNode->walLevel); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetIntValue(pJson, jkDatabaseOptionsNumOfVgroups, &pNode->numOfVgroups); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsSingleStable, &pNode->singleStable); + } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeList(pJson, jkDatabaseOptionsRetentions, &pNode->pRetentions); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsSchemaless, &pNode->schemaless); + } + + return code; +} + static const char* jkDataBlockDescDataBlockId = "DataBlockId"; static const char* jkDataBlockDescSlots = "Slots"; static const char* jkDataBlockTotalRowSize = "TotalRowSize"; @@ -2936,6 +3142,130 @@ static int32_t jsonToSelectStmt(const SJson* pJson, void* pObj) { return code; } +static const char* jkAlterDatabaseStmtDbName = "DbName"; +static const char* jkAlterDatabaseStmtOptions = "Options"; + +static int32_t alterDatabaseStmtToJson(const void* pObj, SJson* pJson) { + const SAlterDatabaseStmt* pNode = (const SAlterDatabaseStmt*)pObj; + + int32_t code = tjsonAddStringToObject(pJson, jkAlterDatabaseStmtDbName, pNode->dbName); + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkAlterDatabaseStmtOptions, nodeToJson, pNode->pOptions); + } + + return code; +} + +static int32_t jsonToAlterDatabaseStmt(const SJson* pJson, void* pObj) { + SAlterDatabaseStmt* pNode = (SAlterDatabaseStmt*)pObj; + + int32_t code = tjsonGetStringValue(pJson, jkAlterDatabaseStmtDbName, pNode->dbName); + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeObject(pJson, jkAlterDatabaseStmtOptions, (SNode**)&pNode->pOptions); + } + + return code; +} + +static const char* jkAlterTableStmtDbName = "DbName"; +static const char* jkAlterTableStmtTableName = "TableName"; +static const char* jkAlterTableStmtAlterType = "AlterType"; +static const char* jkAlterTableStmtColName = "ColName"; +static const char* jkAlterTableStmtNewColName = "NewColName"; +static const char* jkAlterTableStmtOptions = "Options"; +static const char* jkAlterTableStmtNewDataType = "NewDataType"; +static const char* jkAlterTableStmtNewTagVal = "NewTagVal"; + +static int32_t alterTableStmtToJson(const void* pObj, SJson* pJson) { + const SAlterTableStmt* pNode = (const SAlterTableStmt*)pObj; + + int32_t code = tjsonAddStringToObject(pJson, jkAlterTableStmtDbName, pNode->dbName); + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddStringToObject(pJson, jkAlterTableStmtTableName, pNode->tableName); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkAlterTableStmtAlterType, pNode->alterType); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddStringToObject(pJson, jkAlterTableStmtColName, pNode->colName); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddStringToObject(pJson, jkAlterTableStmtNewColName, pNode->newColName); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkAlterTableStmtOptions, nodeToJson, pNode->pOptions); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkAlterTableStmtNewDataType, dataTypeToJson, &pNode->dataType); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkAlterTableStmtOptions, nodeToJson, pNode->pVal); + } + + return code; +} + +static int32_t jsonToAlterTableStmt(const SJson* pJson, void* pObj) { + SAlterTableStmt* pNode = (SAlterTableStmt*)pObj; + + int32_t code = tjsonGetStringValue(pJson, jkAlterTableStmtDbName, pNode->dbName); + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetStringValue(pJson, jkAlterTableStmtTableName, pNode->tableName); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkAlterTableStmtAlterType, &pNode->alterType); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetStringValue(pJson, jkAlterTableStmtColName, pNode->colName); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetStringValue(pJson, jkAlterTableStmtNewColName, pNode->newColName); + } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeObject(pJson, jkAlterTableStmtOptions, (SNode**)&pNode->pOptions); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonToObject(pJson, jkAlterTableStmtNewDataType, jsonToDataType, &pNode->dataType); + } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeObject(pJson, jkAlterTableStmtOptions, (SNode**)&pNode->pVal); + } + + return code; +} + +static const char* jkAlterDnodeStmtDnodeId = "DnodeId"; +static const char* jkAlterDnodeStmtConfig = "Config"; +static const char* jkAlterDnodeStmtValue = "Value"; + +static int32_t alterDnodeStmtToJson(const void* pObj, SJson* pJson) { + const SAlterDnodeStmt* pNode = (const SAlterDnodeStmt*)pObj; + + int32_t code = tjsonAddIntegerToObject(pJson, jkAlterDnodeStmtDnodeId, pNode->dnodeId); + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddStringToObject(pJson, jkAlterDnodeStmtConfig, pNode->config); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddStringToObject(pJson, jkAlterDnodeStmtValue, pNode->value); + } + + return code; +} + +static int32_t jsonToAlterDnodeStmt(const SJson* pJson, void* pObj) { + SAlterDnodeStmt* pNode = (SAlterDnodeStmt*)pObj; + + int32_t code = tjsonGetIntValue(pJson, jkAlterDnodeStmtDnodeId, &pNode->dnodeId); + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetStringValue(pJson, jkAlterDnodeStmtConfig, pNode->config); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetStringValue(pJson, jkAlterDnodeStmtValue, pNode->value); + } + + return code; +} + static const char* jkCreateTopicStmtTopicName = "TopicName"; static const char* jkCreateTopicStmtSubscribeDbName = "SubscribeDbName"; static const char* jkCreateTopicStmtIgnoreExists = "IgnoreExists"; @@ -2999,8 +3329,9 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) { return orderByExprNodeToJson(pObj, pJson); case QUERY_NODE_LIMIT: case QUERY_NODE_STATE_WINDOW: - case QUERY_NODE_SESSION_WINDOW: break; + case QUERY_NODE_SESSION_WINDOW: + return sessionWindowNodeToJson(pObj, pJson); case QUERY_NODE_INTERVAL_WINDOW: return intervalWindowNodeToJson(pObj, pJson); case QUERY_NODE_NODE_LIST: @@ -3019,14 +3350,27 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) { break; case QUERY_NODE_DOWNSTREAM_SOURCE: return downstreamSourceNodeToJson(pObj, pJson); + case QUERY_NODE_DATABASE_OPTIONS: + return databaseOptionsToJson(pObj, pJson); + case QUERY_NODE_LEFT_VALUE: + return TSDB_CODE_SUCCESS; // SLeftValueNode has no fields to serialize. case QUERY_NODE_SET_OPERATOR: return setOperatorToJson(pObj, pJson); case QUERY_NODE_SELECT_STMT: return selectStmtToJson(pObj, pJson); case QUERY_NODE_VNODE_MODIF_STMT: case QUERY_NODE_CREATE_DATABASE_STMT: + break; + case QUERY_NODE_ALTER_DATABASE_STMT: + return alterDatabaseStmtToJson(pObj, pJson); case QUERY_NODE_CREATE_TABLE_STMT: + break; + case QUERY_NODE_ALTER_TABLE_STMT: + return alterTableStmtToJson(pObj, pJson); case QUERY_NODE_USE_DATABASE_STMT: + break; + case QUERY_NODE_ALTER_DNODE_STMT: + return alterDnodeStmtToJson(pObj, pJson); case QUERY_NODE_SHOW_DATABASES_STMT: case QUERY_NODE_SHOW_TABLES_STMT: break; @@ -3078,6 +3422,7 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) { case QUERY_NODE_PHYSICAL_PLAN_FILL: return physiFillNodeToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW: + case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW: return physiSessionWindowNodeToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW: return physiStateWindowNodeToJson(pObj, pJson); @@ -3116,6 +3461,8 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) { return jsonToTempTableNode(pJson, pObj); case QUERY_NODE_ORDER_BY_EXPR: return jsonToOrderByExprNode(pJson, pObj); + case QUERY_NODE_SESSION_WINDOW: + return jsonToSessionWindowNode(pJson, pObj); case QUERY_NODE_INTERVAL_WINDOW: return jsonToIntervalWindowNode(pJson, pObj); case QUERY_NODE_NODE_LIST: @@ -3130,10 +3477,20 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) { return jsonToSlotDescNode(pJson, pObj); case QUERY_NODE_DOWNSTREAM_SOURCE: return jsonToDownstreamSourceNode(pJson, pObj); + case QUERY_NODE_DATABASE_OPTIONS: + return jsonToDatabaseOptions(pJson, pObj); + case QUERY_NODE_LEFT_VALUE: + return TSDB_CODE_SUCCESS; // SLeftValueNode has no fields to deserialize. case QUERY_NODE_SET_OPERATOR: return jsonToSetOperator(pJson, pObj); case QUERY_NODE_SELECT_STMT: return jsonToSelectStmt(pJson, pObj); + case QUERY_NODE_ALTER_DATABASE_STMT: + return jsonToAlterDatabaseStmt(pJson, pObj); + case QUERY_NODE_ALTER_TABLE_STMT: + return jsonToAlterTableStmt(pJson, pObj); + case QUERY_NODE_ALTER_DNODE_STMT: + return jsonToAlterDnodeStmt(pJson, pObj); case QUERY_NODE_CREATE_TOPIC_STMT: return jsonToCreateTopicStmt(pJson, pObj); case QUERY_NODE_LOGIC_PLAN_SCAN: @@ -3176,6 +3533,7 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) { case QUERY_NODE_PHYSICAL_PLAN_FILL: return jsonToPhysiFillNode(pJson, pObj); case QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW: + case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW: return jsonToPhysiSessionWindowNode(pJson, pObj); case QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW: return jsonToPhysiStateWindowNode(pJson, pObj); diff --git a/source/libs/nodes/src/nodesTraverseFuncs.c b/source/libs/nodes/src/nodesTraverseFuncs.c index e8274c3c8eaa916a6e2c3877cde6185b99a623d8..ae1ff5744bcc48eeaec661137e01eeaf01684636 100644 --- a/source/libs/nodes/src/nodesTraverseFuncs.c +++ b/source/libs/nodes/src/nodesTraverseFuncs.c @@ -517,6 +517,7 @@ static EDealRes dispatchPhysiPlan(SNode* pNode, ETraversalOrder order, FNodeWalk res = walkWindowPhysi((SWinodwPhysiNode*)pNode, order, walker, pContext); break; case QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW: + case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW: res = walkWindowPhysi((SWinodwPhysiNode*)pNode, order, walker, pContext); break; case QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW: { diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index 9fb9d8e5514da34620acfb0e385d5e550c041660..e28844f2e16f07c57232b073f0052411d60a2d0f 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -21,147 +21,160 @@ #include "taoserror.h" #include "thash.h" -int32_t nodesNodeSize(ENodeType type) { +static SNode* makeNode(ENodeType type, size_t size) { + SNode* p = taosMemoryCalloc(1, size); + if (NULL == p) { + return NULL; + } + setNodeType(p, type); + return p; +} + +SNodeptr nodesMakeNode(ENodeType type) { switch (type) { case QUERY_NODE_COLUMN: - return sizeof(SColumnNode); + return makeNode(type, sizeof(SColumnNode)); case QUERY_NODE_VALUE: - return sizeof(SValueNode); + return makeNode(type, sizeof(SValueNode)); case QUERY_NODE_OPERATOR: - return sizeof(SOperatorNode); + return makeNode(type, sizeof(SOperatorNode)); case QUERY_NODE_LOGIC_CONDITION: - return sizeof(SLogicConditionNode); + return makeNode(type, sizeof(SLogicConditionNode)); case QUERY_NODE_FUNCTION: - return sizeof(SFunctionNode); + return makeNode(type, sizeof(SFunctionNode)); case QUERY_NODE_REAL_TABLE: - return sizeof(SRealTableNode); + return makeNode(type, sizeof(SRealTableNode)); case QUERY_NODE_TEMP_TABLE: - return sizeof(STempTableNode); + return makeNode(type, sizeof(STempTableNode)); case QUERY_NODE_JOIN_TABLE: - return sizeof(SJoinTableNode); + return makeNode(type, sizeof(SJoinTableNode)); case QUERY_NODE_GROUPING_SET: - return sizeof(SGroupingSetNode); + return makeNode(type, sizeof(SGroupingSetNode)); case QUERY_NODE_ORDER_BY_EXPR: - return sizeof(SOrderByExprNode); + return makeNode(type, sizeof(SOrderByExprNode)); case QUERY_NODE_LIMIT: - return sizeof(SLimitNode); + return makeNode(type, sizeof(SLimitNode)); case QUERY_NODE_STATE_WINDOW: - return sizeof(SStateWindowNode); + return makeNode(type, sizeof(SStateWindowNode)); case QUERY_NODE_SESSION_WINDOW: - return sizeof(SSessionWindowNode); + return makeNode(type, sizeof(SSessionWindowNode)); case QUERY_NODE_INTERVAL_WINDOW: - return sizeof(SIntervalWindowNode); + return makeNode(type, sizeof(SIntervalWindowNode)); case QUERY_NODE_NODE_LIST: - return sizeof(SNodeListNode); + return makeNode(type, sizeof(SNodeListNode)); case QUERY_NODE_FILL: - return sizeof(SFillNode); + return makeNode(type, sizeof(SFillNode)); case QUERY_NODE_RAW_EXPR: - return sizeof(SRawExprNode); + return makeNode(type, sizeof(SRawExprNode)); case QUERY_NODE_TARGET: - return sizeof(STargetNode); + return makeNode(type, sizeof(STargetNode)); case QUERY_NODE_DATABLOCK_DESC: - return sizeof(SDataBlockDescNode); + return makeNode(type, sizeof(SDataBlockDescNode)); case QUERY_NODE_SLOT_DESC: - return sizeof(SSlotDescNode); + return makeNode(type, sizeof(SSlotDescNode)); case QUERY_NODE_COLUMN_DEF: - return sizeof(SColumnDefNode); + return makeNode(type, sizeof(SColumnDefNode)); case QUERY_NODE_DOWNSTREAM_SOURCE: - return sizeof(SDownstreamSourceNode); + return makeNode(type, sizeof(SDownstreamSourceNode)); case QUERY_NODE_DATABASE_OPTIONS: - return sizeof(SDatabaseOptions); + return makeNode(type, sizeof(SDatabaseOptions)); case QUERY_NODE_TABLE_OPTIONS: - return sizeof(STableOptions); + return makeNode(type, sizeof(STableOptions)); case QUERY_NODE_INDEX_OPTIONS: - return sizeof(SIndexOptions); + return makeNode(type, sizeof(SIndexOptions)); case QUERY_NODE_EXPLAIN_OPTIONS: - return sizeof(SExplainOptions); + return makeNode(type, sizeof(SExplainOptions)); case QUERY_NODE_STREAM_OPTIONS: - return sizeof(SStreamOptions); + return makeNode(type, sizeof(SStreamOptions)); case QUERY_NODE_TOPIC_OPTIONS: - return sizeof(STopicOptions); + return makeNode(type, sizeof(STopicOptions)); + case QUERY_NODE_LEFT_VALUE: + return makeNode(type, sizeof(SLeftValueNode)); case QUERY_NODE_SET_OPERATOR: - return sizeof(SSetOperator); + return makeNode(type, sizeof(SSetOperator)); case QUERY_NODE_SELECT_STMT: - return sizeof(SSelectStmt); + return makeNode(type, sizeof(SSelectStmt)); case QUERY_NODE_VNODE_MODIF_STMT: - return sizeof(SVnodeModifOpStmt); + return makeNode(type, sizeof(SVnodeModifOpStmt)); case QUERY_NODE_CREATE_DATABASE_STMT: - return sizeof(SCreateDatabaseStmt); + return makeNode(type, sizeof(SCreateDatabaseStmt)); case QUERY_NODE_DROP_DATABASE_STMT: - return sizeof(SDropDatabaseStmt); + return makeNode(type, sizeof(SDropDatabaseStmt)); case QUERY_NODE_ALTER_DATABASE_STMT: - return sizeof(SAlterDatabaseStmt); + return makeNode(type, sizeof(SAlterDatabaseStmt)); case QUERY_NODE_CREATE_TABLE_STMT: - return sizeof(SCreateTableStmt); + return makeNode(type, sizeof(SCreateTableStmt)); case QUERY_NODE_CREATE_SUBTABLE_CLAUSE: - return sizeof(SCreateSubTableClause); + return makeNode(type, sizeof(SCreateSubTableClause)); case QUERY_NODE_CREATE_MULTI_TABLE_STMT: - return sizeof(SCreateMultiTableStmt); + return makeNode(type, sizeof(SCreateMultiTableStmt)); case QUERY_NODE_DROP_TABLE_CLAUSE: - return sizeof(SDropTableClause); + return makeNode(type, sizeof(SDropTableClause)); case QUERY_NODE_DROP_TABLE_STMT: - return sizeof(SDropTableStmt); + return makeNode(type, sizeof(SDropTableStmt)); case QUERY_NODE_DROP_SUPER_TABLE_STMT: - return sizeof(SDropSuperTableStmt); + return makeNode(type, sizeof(SDropSuperTableStmt)); case QUERY_NODE_ALTER_TABLE_STMT: - return sizeof(SAlterTableStmt); + return makeNode(type, sizeof(SAlterTableStmt)); case QUERY_NODE_CREATE_USER_STMT: - return sizeof(SCreateUserStmt); + return makeNode(type, sizeof(SCreateUserStmt)); case QUERY_NODE_ALTER_USER_STMT: - return sizeof(SAlterUserStmt); + return makeNode(type, sizeof(SAlterUserStmt)); case QUERY_NODE_DROP_USER_STMT: - return sizeof(SDropUserStmt); + return makeNode(type, sizeof(SDropUserStmt)); case QUERY_NODE_USE_DATABASE_STMT: - return sizeof(SUseDatabaseStmt); + return makeNode(type, sizeof(SUseDatabaseStmt)); case QUERY_NODE_CREATE_DNODE_STMT: - return sizeof(SCreateDnodeStmt); + return makeNode(type, sizeof(SCreateDnodeStmt)); case QUERY_NODE_DROP_DNODE_STMT: - return sizeof(SDropDnodeStmt); + return makeNode(type, sizeof(SDropDnodeStmt)); case QUERY_NODE_ALTER_DNODE_STMT: - return sizeof(SAlterDnodeStmt); + return makeNode(type, sizeof(SAlterDnodeStmt)); case QUERY_NODE_CREATE_INDEX_STMT: - return sizeof(SCreateIndexStmt); + return makeNode(type, sizeof(SCreateIndexStmt)); case QUERY_NODE_DROP_INDEX_STMT: - return sizeof(SDropIndexStmt); + return makeNode(type, sizeof(SDropIndexStmt)); case QUERY_NODE_CREATE_QNODE_STMT: case QUERY_NODE_CREATE_BNODE_STMT: case QUERY_NODE_CREATE_SNODE_STMT: case QUERY_NODE_CREATE_MNODE_STMT: - return sizeof(SCreateComponentNodeStmt); + return makeNode(type, sizeof(SCreateComponentNodeStmt)); case QUERY_NODE_DROP_QNODE_STMT: case QUERY_NODE_DROP_BNODE_STMT: case QUERY_NODE_DROP_SNODE_STMT: case QUERY_NODE_DROP_MNODE_STMT: - return sizeof(SDropComponentNodeStmt); + return makeNode(type, sizeof(SDropComponentNodeStmt)); case QUERY_NODE_CREATE_TOPIC_STMT: - return sizeof(SCreateTopicStmt); + return makeNode(type, sizeof(SCreateTopicStmt)); case QUERY_NODE_DROP_TOPIC_STMT: - return sizeof(SDropTopicStmt); + return makeNode(type, sizeof(SDropTopicStmt)); + case QUERY_NODE_DROP_CGROUP_STMT: + return makeNode(type, sizeof(SDropCGroupStmt)); case QUERY_NODE_EXPLAIN_STMT: - return sizeof(SExplainStmt); + return makeNode(type, sizeof(SExplainStmt)); case QUERY_NODE_DESCRIBE_STMT: - return sizeof(SDescribeStmt); + return makeNode(type, sizeof(SDescribeStmt)); case QUERY_NODE_RESET_QUERY_CACHE_STMT: - return sizeof(SNode); + return makeNode(type, sizeof(SNode)); case QUERY_NODE_COMPACT_STMT: break; case QUERY_NODE_CREATE_FUNCTION_STMT: - return sizeof(SCreateFunctionStmt); + return makeNode(type, sizeof(SCreateFunctionStmt)); case QUERY_NODE_DROP_FUNCTION_STMT: - return sizeof(SDropFunctionStmt); + return makeNode(type, sizeof(SDropFunctionStmt)); case QUERY_NODE_CREATE_STREAM_STMT: - return sizeof(SCreateStreamStmt); + return makeNode(type, sizeof(SCreateStreamStmt)); case QUERY_NODE_DROP_STREAM_STMT: - return sizeof(SDropStreamStmt); + return makeNode(type, sizeof(SDropStreamStmt)); case QUERY_NODE_MERGE_VGROUP_STMT: case QUERY_NODE_REDISTRIBUTE_VGROUP_STMT: case QUERY_NODE_SPLIT_VGROUP_STMT: case QUERY_NODE_SYNCDB_STMT: break; case QUERY_NODE_GRANT_STMT: - return sizeof(SGrantStmt); + return makeNode(type, sizeof(SGrantStmt)); case QUERY_NODE_REVOKE_STMT: - return sizeof(SRevokeStmt); + return makeNode(type, sizeof(SRevokeStmt)); case QUERY_NODE_SHOW_DNODES_STMT: case QUERY_NODE_SHOW_MNODES_STMT: case QUERY_NODE_SHOW_MODULES_STMT: @@ -192,89 +205,82 @@ int32_t nodesNodeSize(ENodeType type) { case QUERY_NODE_SHOW_CREATE_TABLE_STMT: case QUERY_NODE_SHOW_CREATE_STABLE_STMT: case QUERY_NODE_SHOW_TRANSACTIONS_STMT: - return sizeof(SShowStmt); + return makeNode(type, sizeof(SShowStmt)); case QUERY_NODE_KILL_CONNECTION_STMT: case QUERY_NODE_KILL_QUERY_STMT: case QUERY_NODE_KILL_TRANSACTION_STMT: - return sizeof(SKillStmt); + return makeNode(type, sizeof(SKillStmt)); case QUERY_NODE_LOGIC_PLAN_SCAN: - return sizeof(SScanLogicNode); + return makeNode(type, sizeof(SScanLogicNode)); case QUERY_NODE_LOGIC_PLAN_JOIN: - return sizeof(SJoinLogicNode); + return makeNode(type, sizeof(SJoinLogicNode)); case QUERY_NODE_LOGIC_PLAN_AGG: - return sizeof(SAggLogicNode); + return makeNode(type, sizeof(SAggLogicNode)); case QUERY_NODE_LOGIC_PLAN_PROJECT: - return sizeof(SProjectLogicNode); + return makeNode(type, sizeof(SProjectLogicNode)); case QUERY_NODE_LOGIC_PLAN_VNODE_MODIF: - return sizeof(SVnodeModifLogicNode); + return makeNode(type, sizeof(SVnodeModifLogicNode)); case QUERY_NODE_LOGIC_PLAN_EXCHANGE: - return sizeof(SExchangeLogicNode); + return makeNode(type, sizeof(SExchangeLogicNode)); case QUERY_NODE_LOGIC_PLAN_WINDOW: - return sizeof(SWindowLogicNode); + return makeNode(type, sizeof(SWindowLogicNode)); case QUERY_NODE_LOGIC_PLAN_FILL: - return sizeof(SFillLogicNode); + return makeNode(type, sizeof(SFillLogicNode)); case QUERY_NODE_LOGIC_PLAN_SORT: - return sizeof(SSortLogicNode); + return makeNode(type, sizeof(SSortLogicNode)); case QUERY_NODE_LOGIC_PLAN_PARTITION: - return sizeof(SPartitionLogicNode); + return makeNode(type, sizeof(SPartitionLogicNode)); case QUERY_NODE_LOGIC_SUBPLAN: - return sizeof(SLogicSubplan); + return makeNode(type, sizeof(SLogicSubplan)); case QUERY_NODE_LOGIC_PLAN: - return sizeof(SQueryLogicPlan); + return makeNode(type, sizeof(SQueryLogicPlan)); case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: - return sizeof(STagScanPhysiNode); + return makeNode(type, sizeof(STagScanPhysiNode)); case QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN: - return sizeof(STableScanPhysiNode); + return makeNode(type, sizeof(STableScanPhysiNode)); case QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN: - return sizeof(STableSeqScanPhysiNode); + return makeNode(type, sizeof(STableSeqScanPhysiNode)); case QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN: - return sizeof(SStreamScanPhysiNode); + return makeNode(type, sizeof(SStreamScanPhysiNode)); case QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN: - return sizeof(SSystemTableScanPhysiNode); + return makeNode(type, sizeof(SSystemTableScanPhysiNode)); case QUERY_NODE_PHYSICAL_PLAN_PROJECT: - return sizeof(SProjectPhysiNode); + return makeNode(type, sizeof(SProjectPhysiNode)); case QUERY_NODE_PHYSICAL_PLAN_JOIN: - return sizeof(SJoinPhysiNode); + return makeNode(type, sizeof(SJoinPhysiNode)); case QUERY_NODE_PHYSICAL_PLAN_AGG: - return sizeof(SAggPhysiNode); + return makeNode(type, sizeof(SAggPhysiNode)); case QUERY_NODE_PHYSICAL_PLAN_EXCHANGE: - return sizeof(SExchangePhysiNode); + return makeNode(type, sizeof(SExchangePhysiNode)); case QUERY_NODE_PHYSICAL_PLAN_SORT: - return sizeof(SSortPhysiNode); + return makeNode(type, sizeof(SSortPhysiNode)); case QUERY_NODE_PHYSICAL_PLAN_INTERVAL: - return sizeof(SIntervalPhysiNode); + return makeNode(type, sizeof(SIntervalPhysiNode)); case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL: - return sizeof(SStreamIntervalPhysiNode); + return makeNode(type, sizeof(SStreamIntervalPhysiNode)); case QUERY_NODE_PHYSICAL_PLAN_FILL: - return sizeof(SFillPhysiNode); + return makeNode(type, sizeof(SFillPhysiNode)); case QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW: - return sizeof(SSessionWinodwPhysiNode); + return makeNode(type, sizeof(SSessionWinodwPhysiNode)); + case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW: + return makeNode(type, sizeof(SStreamSessionWinodwPhysiNode)); case QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW: - return sizeof(SStateWinodwPhysiNode); + return makeNode(type, sizeof(SStateWinodwPhysiNode)); case QUERY_NODE_PHYSICAL_PLAN_PARTITION: - return sizeof(SPartitionPhysiNode); + return makeNode(type, sizeof(SPartitionPhysiNode)); case QUERY_NODE_PHYSICAL_PLAN_DISPATCH: - return sizeof(SDataDispatcherNode); + return makeNode(type, sizeof(SDataDispatcherNode)); case QUERY_NODE_PHYSICAL_PLAN_INSERT: - return sizeof(SDataInserterNode); + return makeNode(type, sizeof(SDataInserterNode)); case QUERY_NODE_PHYSICAL_SUBPLAN: - return sizeof(SSubplan); + return makeNode(type, sizeof(SSubplan)); case QUERY_NODE_PHYSICAL_PLAN: - return sizeof(SQueryPlan); + return makeNode(type, sizeof(SQueryPlan)); default: break; } nodesError("nodesMakeNode unknown node = %s", nodesNodeName(type)); - return 0; -} - -SNodeptr nodesMakeNode(ENodeType type) { - SNode* p = taosMemoryCalloc(1, nodesNodeSize(type)); - if (NULL == p) { - return NULL; - } - setNodeType(p, type); - return p; + return NULL; } static void destroyVgDataBlockArray(SArray* pArray) { @@ -662,6 +668,7 @@ void nodesDestroyNode(SNodeptr pNode) { destroyWinodwPhysiNode((SWinodwPhysiNode*)pNode); break; case QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW: + case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW: destroyWinodwPhysiNode((SWinodwPhysiNode*)pNode); break; case QUERY_NODE_PHYSICAL_PLAN_DISPATCH: @@ -1117,6 +1124,7 @@ bool nodesIsComparisonOp(const SOperatorNode* pOp) { bool nodesIsJsonOp(const SOperatorNode* pOp) { switch (pOp->opType) { case OP_TYPE_JSON_GET_VALUE: + case OP_TYPE_JSON_CONTAINS: return true; default: break; diff --git a/source/libs/parser/inc/parAst.h b/source/libs/parser/inc/parAst.h index fc096a057c3bbe71ce844e1ac82bdde8273862d0..a1c304118bfcdc5078bf0a19b73a8bde17e3c0cf 100644 --- a/source/libs/parser/inc/parAst.h +++ b/source/libs/parser/inc/parAst.h @@ -53,7 +53,8 @@ typedef enum EDatabaseOptionType { DB_OPTION_WAL, DB_OPTION_VGROUPS, DB_OPTION_SINGLE_STABLE, - DB_OPTION_RETENTIONS + DB_OPTION_RETENTIONS, + DB_OPTION_SCHEMALESS } EDatabaseOptionType; typedef enum ETableOptionType { @@ -143,12 +144,12 @@ SNode* createDropTableClause(SAstCreateContext* pCxt, bool ignoreNotExists, SNod SNode* createDropTableStmt(SAstCreateContext* pCxt, SNodeList* pTables); SNode* createDropSuperTableStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SNode* pRealTable); SNode* createAlterTableModifyOptions(SAstCreateContext* pCxt, SNode* pRealTable, SNode* pOptions); -SNode* createAlterTableAddModifyCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_t alterType, - const SToken* pColName, SDataType dataType); -SNode* createAlterTableDropCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_t alterType, const SToken* pColName); -SNode* createAlterTableRenameCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_t alterType, - const SToken* pOldColName, const SToken* pNewColName); -SNode* createAlterTableSetTag(SAstCreateContext* pCxt, SNode* pRealTable, const SToken* pTagName, SNode* pVal); +SNode* createAlterTableAddModifyCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_t alterType, SToken* pColName, + SDataType dataType); +SNode* createAlterTableDropCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_t alterType, SToken* pColName); +SNode* createAlterTableRenameCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_t alterType, SToken* pOldColName, + SToken* pNewColName); +SNode* createAlterTableSetTag(SAstCreateContext* pCxt, SNode* pRealTable, SToken* pTagName, SNode* pVal); SNode* createUseDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName); SNode* createShowStmt(SAstCreateContext* pCxt, ENodeType type, SNode* pDbName, SNode* pTbNamePattern); SNode* createShowCreateDatabaseStmt(SAstCreateContext* pCxt, const SToken* pDbName); @@ -169,6 +170,8 @@ SNode* createTopicOptions(SAstCreateContext* pCxt); SNode* createCreateTopicStmt(SAstCreateContext* pCxt, bool ignoreExists, const SToken* pTopicName, SNode* pQuery, const SToken* pSubscribeDbName, SNode* pOptions); SNode* createDropTopicStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const SToken* pTopicName); +SNode* createDropCGroupStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const SToken* pCGroupId, + const SToken* pTopicName); SNode* createAlterLocalStmt(SAstCreateContext* pCxt, const SToken* pConfig, const SToken* pValue); SNode* createDefaultExplainOptions(SAstCreateContext* pCxt); SNode* setExplainVerbose(SAstCreateContext* pCxt, SNode* pOptions, const SToken* pVal); diff --git a/source/libs/parser/inc/parInsertData.h b/source/libs/parser/inc/parInsertData.h index e19f54dff36a696665d09796dc78eb0b6ca34068..aeebf51c96efa271799a66e9223065d4fd0314b9 100644 --- a/source/libs/parser/inc/parInsertData.h +++ b/source/libs/parser/inc/parInsertData.h @@ -94,7 +94,7 @@ static FORCE_INLINE void getSTSRowAppendInfo(uint8_t rowType, SParsedDataColInfo col_id_t *colIdx) { col_id_t schemaIdx = 0; if (IS_DATA_COL_ORDERED(spd)) { - schemaIdx = spd->boundColumns[idx] - PRIMARYKEY_TIMESTAMP_COL_ID; + schemaIdx = spd->boundColumns[idx]; if (TD_IS_TP_ROW_T(rowType)) { *toffset = (spd->cols + schemaIdx)->toffset; // the offset of firstPart *colIdx = schemaIdx; @@ -104,7 +104,7 @@ static FORCE_INLINE void getSTSRowAppendInfo(uint8_t rowType, SParsedDataColInfo } } else { ASSERT(idx == (spd->colIdxInfo + idx)->boundIdx); - schemaIdx = (spd->colIdxInfo + idx)->schemaColIdx - PRIMARYKEY_TIMESTAMP_COL_ID; + schemaIdx = (spd->colIdxInfo + idx)->schemaColIdx; if (TD_IS_TP_ROW_T(rowType)) { *toffset = (spd->cols + schemaIdx)->toffset; *colIdx = schemaIdx; @@ -133,14 +133,15 @@ static FORCE_INLINE int32_t setBlockInfo(SSubmitBlk *pBlocks, STableDataBlocks * int32_t schemaIdxCompar(const void *lhs, const void *rhs); int32_t boundIdxCompar(const void *lhs, const void *rhs); void setBoundColumnInfo(SParsedDataColInfo *pColList, SSchema *pSchema, col_id_t numOfCols); -void destroyBlockArrayList(SArray* pDataBlockList); -void destroyBlockHashmap(SHashObj* pDataBlockHash); -int initRowBuilder(SRowBuilder *pBuilder, int16_t schemaVer, SParsedDataColInfo *pColInfo); -int32_t allocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t * numOfRows); -int32_t getDataBlockFromList(SHashObj* pHashList, void* id, int32_t idLen, int32_t size, int32_t startOffset, int32_t rowSize, - STableMeta* pTableMeta, STableDataBlocks** dataBlocks, SArray* pBlockList, SVCreateTbReq* pCreateTbReq); -int32_t mergeTableDataBlocks(SHashObj* pHashObj, uint8_t payloadType, SArray** pVgDataBlocks); -int32_t buildCreateTbMsg(STableDataBlocks* pBlocks, SVCreateTbReq* pCreateTbReq); +void destroyBlockArrayList(SArray *pDataBlockList); +void destroyBlockHashmap(SHashObj *pDataBlockHash); +int initRowBuilder(SRowBuilder *pBuilder, int16_t schemaVer, SParsedDataColInfo *pColInfo); +int32_t allocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t *numOfRows); +int32_t getDataBlockFromList(SHashObj *pHashList, void *id, int32_t idLen, int32_t size, int32_t startOffset, + int32_t rowSize, STableMeta *pTableMeta, STableDataBlocks **dataBlocks, SArray *pBlockList, + SVCreateTbReq *pCreateTbReq); +int32_t mergeTableDataBlocks(SHashObj *pHashObj, uint8_t payloadType, SArray **pVgDataBlocks); +int32_t buildCreateTbMsg(STableDataBlocks *pBlocks, SVCreateTbReq *pCreateTbReq); int32_t allocateMemForSize(STableDataBlocks *pDataBlock, int32_t allSize); diff --git a/source/libs/parser/inc/parInt.h b/source/libs/parser/inc/parInt.h index 2ad1ebc1121d96f243fff9d55980b26bffdf6c04..3efe6700d2339b2234d33f868c0b42fa993d1b64 100644 --- a/source/libs/parser/inc/parInt.h +++ b/source/libs/parser/inc/parInt.h @@ -24,8 +24,10 @@ extern "C" { #include "parUtil.h" #include "parser.h" +int32_t parseInsertSyntax(SParseContext* pContext, SQuery** pQuery); int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery); int32_t parse(SParseContext* pParseCxt, SQuery** pQuery); +int32_t collectMetaKey(SParseContext* pParseCxt, SQuery* pQuery); int32_t authenticate(SParseContext* pParseCxt, SQuery* pQuery); int32_t translate(SParseContext* pParseCxt, SQuery* pQuery); int32_t extractResultSchema(const SNode* pRoot, int32_t* numOfCols, SSchema** pSchema); diff --git a/source/libs/parser/inc/parUtil.h b/source/libs/parser/inc/parUtil.h index f82d29d27eeb2f8b80baf56ff7c065025abcc3b5..2468f7d75bd16acb21c2bb45dd74ae46c1b669d7 100644 --- a/source/libs/parser/inc/parUtil.h +++ b/source/libs/parser/inc/parUtil.h @@ -20,15 +20,16 @@ extern "C" { #endif +#include "catalog.h" #include "os.h" #include "query.h" -#define parserFatal(param, ...) qFatal("PARSER: " param, __VA_ARGS__) -#define parserError(param, ...) qError("PARSER: " param, __VA_ARGS__) -#define parserWarn(param, ...) qWarn("PARSER: " param, __VA_ARGS__) -#define parserInfo(param, ...) qInfo("PARSER: " param, __VA_ARGS__) -#define parserDebug(param, ...) qDebug("PARSER: " param, __VA_ARGS__) -#define parserTrace(param, ...) qTrace("PARSER: " param, __VA_ARGS__) +#define parserFatal(param, ...) qFatal("PARSER: " param, ##__VA_ARGS__) +#define parserError(param, ...) qError("PARSER: " param, ##__VA_ARGS__) +#define parserWarn(param, ...) qWarn("PARSER: " param, ##__VA_ARGS__) +#define parserInfo(param, ...) qInfo("PARSER: " param, ##__VA_ARGS__) +#define parserDebug(param, ...) qDebug("PARSER: " param, ##__VA_ARGS__) +#define parserTrace(param, ...) qTrace("PARSER: " param, ##__VA_ARGS__) #define PK_TS_COL_INTERNAL_NAME "_rowts" @@ -37,6 +38,16 @@ typedef struct SMsgBuf { char* buf; } SMsgBuf; +typedef struct SParseMetaCache { + SHashObj* pTableMeta; // key is tbFName, element is STableMeta* + SHashObj* pDbVgroup; // key is dbFName, element is SArray* + SHashObj* pTableVgroup; // key is tbFName, element is SVgroupInfo* + SHashObj* pDbCfg; // key is tbFName, element is SDbCfgInfo* + SHashObj* pDbInfo; // key is tbFName, element is SDbInfo* + SHashObj* pUserAuth; // key is SUserAuthInfo serialized string, element is bool indicating whether or not to pass + SHashObj* pUdf; // key is funcName, element is SFuncInfo* +} SParseMetaCache; + int32_t generateSyntaxErrMsg(SMsgBuf* pBuf, int32_t errCode, ...); int32_t buildInvalidOperationMsg(SMsgBuf* pMsgBuf, const char* msg); int32_t buildSyntaxErrMsg(SMsgBuf* pBuf, const char* additionalInfo, const char* sourceStr); @@ -47,10 +58,33 @@ int32_t getNumOfColumns(const STableMeta* pTableMeta); int32_t getNumOfTags(const STableMeta* pTableMeta); STableComInfo getTableInfo(const STableMeta* pTableMeta); STableMeta* tableMetaDup(const STableMeta* pTableMeta); -int parseJsontoTagData(const char* json, SKVRowBuilder* kvRowBuilder, SMsgBuf* errMsg, int16_t startColId); +int32_t parseJsontoTagData(const char* json, SKVRowBuilder* kvRowBuilder, SMsgBuf* errMsg, int16_t startColId); int32_t trimString(const char* src, int32_t len, char* dst, int32_t dlen); +int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq); +int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache); +int32_t reserveTableMetaInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache); +int32_t reserveTableMetaInCacheExt(const SName* pName, SParseMetaCache* pMetaCache); +int32_t reserveDbVgInfoInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache); +int32_t reserveTableVgroupInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache); +int32_t reserveTableVgroupInCacheExt(const SName* pName, SParseMetaCache* pMetaCache); +int32_t reserveDbVgVersionInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache); +int32_t reserveDbCfgInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache); +int32_t reserveUserAuthInCache(int32_t acctId, const char* pUser, const char* pDb, AUTH_TYPE type, + SParseMetaCache* pMetaCache); +int32_t reserveUserAuthInCacheExt(const char* pUser, const SName* pName, AUTH_TYPE type, SParseMetaCache* pMetaCache); +int32_t reserveUdfInCache(const char* pFunc, SParseMetaCache* pMetaCache); +int32_t getTableMetaFromCache(SParseMetaCache* pMetaCache, const SName* pName, STableMeta** pMeta); +int32_t getDbVgInfoFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SArray** pVgInfo); +int32_t getTableVgroupFromCache(SParseMetaCache* pMetaCache, const SName* pName, SVgroupInfo* pVgroup); +int32_t getDbVgVersionFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, int32_t* pVersion, int64_t* pDbId, + int32_t* pTableNum); +int32_t getDbCfgFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SDbCfgInfo* pInfo); +int32_t getUserAuthFromCache(SParseMetaCache* pMetaCache, const char* pUser, const char* pDbFName, AUTH_TYPE type, + bool* pPass); +int32_t getUdfInfoFromCache(SParseMetaCache* pMetaCache, const char* pFunc, SFuncInfo* pInfo); + #ifdef __cplusplus } #endif diff --git a/source/libs/parser/inc/sql.y b/source/libs/parser/inc/sql.y index 2cba1eb043e0e4063b4a7519a3c116820412c69d..1fb60f83a5a822e627f8cbdf54b3a1e42c4daa5d 100644 --- a/source/libs/parser/inc/sql.y +++ b/source/libs/parser/inc/sql.y @@ -180,6 +180,7 @@ db_options(A) ::= db_options(B) WAL NK_INTEGER(C). db_options(A) ::= db_options(B) VGROUPS NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_VGROUPS, &C); } db_options(A) ::= db_options(B) SINGLE_STABLE NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_SINGLE_STABLE, &C); } db_options(A) ::= db_options(B) RETENTIONS retention_list(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_RETENTIONS, C); } +db_options(A) ::= db_options(B) SCHEMALESS NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_SCHEMALESS, &C); } alter_db_options(A) ::= alter_db_option(B). { A = createAlterDatabaseOptions(pCxt); A = setAlterDatabaseOption(pCxt, A, &B); } alter_db_options(A) ::= alter_db_options(B) alter_db_option(C). { A = setAlterDatabaseOption(pCxt, B, &C); } @@ -407,6 +408,7 @@ cmd ::= CREATE TOPIC not_exists_opt(A) cmd ::= CREATE TOPIC not_exists_opt(A) topic_name(B) topic_options(D) AS db_name(C). { pCxt->pRootNode = createCreateTopicStmt(pCxt, A, &B, NULL, &C, D); } cmd ::= DROP TOPIC exists_opt(A) topic_name(B). { pCxt->pRootNode = createDropTopicStmt(pCxt, A, &B); } +cmd ::= DROP CGROUP exists_opt(A) cgroup_name(B) ON topic_name(C). { pCxt->pRootNode = createDropCGroupStmt(pCxt, A, &B, &C); } topic_options(A) ::= . { A = createTopicOptions(pCxt); } topic_options(A) ::= topic_options(B) WITH TABLE. { ((STopicOptions*)B)->withTable = true; A = B; } @@ -565,6 +567,10 @@ topic_name(A) ::= NK_ID(B). %destructor stream_name { } stream_name(A) ::= NK_ID(B). { A = B; } +%type cgroup_name { SToken } +%destructor cgroup_name { } +cgroup_name(A) ::= NK_ID(B). { A = B; } + /************************************************ expression **********************************************************/ expression(A) ::= literal(B). { A = B; } expression(A) ::= pseudo_column(B). { A = B; } diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index 80c4593d9bb683788953d0410f654b813d0c126a..836a0cb520684e264cecb3cd6425ae3c7688de68 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -29,17 +29,17 @@ } \ } while (0) -#define CHECK_RAW_EXPR_NODE(node) \ - do { \ - if (NULL == (node) || QUERY_NODE_RAW_EXPR != nodeType(node)) { \ - pCxt->errCode = TSDB_CODE_PAR_SYNTAX_ERROR; \ - return NULL; \ - } \ +#define CHECK_PARSER_STATUS(pCxt) \ + do { \ + if (TSDB_CODE_SUCCESS != pCxt->errCode) { \ + return NULL; \ + } \ } while (0) SToken nil_token = {.type = TK_NK_NIL, .n = 0, .z = NULL}; void initAstCreateContext(SParseContext* pParseCxt, SAstCreateContext* pCxt) { + memset(pCxt, 0, sizeof(SAstCreateContext)); pCxt->pQueryCxt = pParseCxt; pCxt->msgBuf.buf = pParseCxt->pMsg; pCxt->msgBuf.len = pParseCxt->msgLen; @@ -206,6 +206,7 @@ static bool checkComment(SAstCreateContext* pCxt, const SToken* pCommentToken, b } SNode* createRawExprNode(SAstCreateContext* pCxt, const SToken* pToken, SNode* pNode) { + CHECK_PARSER_STATUS(pCxt); SRawExprNode* target = (SRawExprNode*)nodesMakeNode(QUERY_NODE_RAW_EXPR); CHECK_OUT_OF_MEM(target); target->p = pToken->z; @@ -215,6 +216,7 @@ SNode* createRawExprNode(SAstCreateContext* pCxt, const SToken* pToken, SNode* p } SNode* createRawExprNodeExt(SAstCreateContext* pCxt, const SToken* pStart, const SToken* pEnd, SNode* pNode) { + CHECK_PARSER_STATUS(pCxt); SRawExprNode* target = (SRawExprNode*)nodesMakeNode(QUERY_NODE_RAW_EXPR); CHECK_OUT_OF_MEM(target); target->p = pStart->z; @@ -224,7 +226,7 @@ SNode* createRawExprNodeExt(SAstCreateContext* pCxt, const SToken* pStart, const } SNode* releaseRawExprNode(SAstCreateContext* pCxt, SNode* pNode) { - CHECK_RAW_EXPR_NODE(pNode); + CHECK_PARSER_STATUS(pCxt); SRawExprNode* pRawExpr = (SRawExprNode*)pNode; SNode* pExpr = pRawExpr->pNode; if (nodesIsExprNode(pExpr)) { @@ -247,6 +249,7 @@ SToken getTokenFromRawExprNode(SAstCreateContext* pCxt, SNode* pNode) { } SNodeList* createNodeList(SAstCreateContext* pCxt, SNode* pNode) { + CHECK_PARSER_STATUS(pCxt); SNodeList* list = nodesMakeList(); CHECK_OUT_OF_MEM(list); pCxt->errCode = nodesListAppend(list, pNode); @@ -254,11 +257,13 @@ SNodeList* createNodeList(SAstCreateContext* pCxt, SNode* pNode) { } SNodeList* addNodeToList(SAstCreateContext* pCxt, SNodeList* pList, SNode* pNode) { + CHECK_PARSER_STATUS(pCxt); pCxt->errCode = nodesListAppend(pList, pNode); return pList; } SNode* createColumnNode(SAstCreateContext* pCxt, SToken* pTableAlias, SToken* pColumnName) { + CHECK_PARSER_STATUS(pCxt); if (!checkTableName(pCxt, pTableAlias) || !checkColumnName(pCxt, pColumnName)) { return NULL; } @@ -272,6 +277,7 @@ SNode* createColumnNode(SAstCreateContext* pCxt, SToken* pTableAlias, SToken* pC } SNode* createValueNode(SAstCreateContext* pCxt, int32_t dataType, const SToken* pLiteral) { + CHECK_PARSER_STATUS(pCxt); SValueNode* val = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); CHECK_OUT_OF_MEM(val); val->literal = strndup(pLiteral->z, pLiteral->n); @@ -291,6 +297,7 @@ SNode* createValueNode(SAstCreateContext* pCxt, int32_t dataType, const SToken* } SNode* createDurationValueNode(SAstCreateContext* pCxt, const SToken* pLiteral) { + CHECK_PARSER_STATUS(pCxt); SValueNode* val = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); CHECK_OUT_OF_MEM(val); val->literal = strndup(pLiteral->z, pLiteral->n); @@ -304,6 +311,7 @@ SNode* createDurationValueNode(SAstCreateContext* pCxt, const SToken* pLiteral) } SNode* createDefaultDatabaseCondValue(SAstCreateContext* pCxt) { + CHECK_PARSER_STATUS(pCxt); if (NULL == pCxt->pQueryCxt->db) { return NULL; } @@ -321,6 +329,7 @@ SNode* createDefaultDatabaseCondValue(SAstCreateContext* pCxt) { } SNode* createPlaceholderValueNode(SAstCreateContext* pCxt, const SToken* pLiteral) { + CHECK_PARSER_STATUS(pCxt); SValueNode* val = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); CHECK_OUT_OF_MEM(val); val->literal = strndup(pLiteral->z, pLiteral->n); @@ -338,34 +347,30 @@ SNode* createPlaceholderValueNode(SAstCreateContext* pCxt, const SToken* pLitera } SNode* createLogicConditionNode(SAstCreateContext* pCxt, ELogicConditionType type, SNode* pParam1, SNode* pParam2) { + CHECK_PARSER_STATUS(pCxt); SLogicConditionNode* cond = (SLogicConditionNode*)nodesMakeNode(QUERY_NODE_LOGIC_CONDITION); CHECK_OUT_OF_MEM(cond); cond->condType = type; cond->pParameterList = nodesMakeList(); - if ((QUERY_NODE_LOGIC_CONDITION == nodeType(pParam1) && type != ((SLogicConditionNode*)pParam1)->condType) || - (QUERY_NODE_LOGIC_CONDITION == nodeType(pParam2) && type != ((SLogicConditionNode*)pParam2)->condType)) { + if (QUERY_NODE_LOGIC_CONDITION == nodeType(pParam1) && type == ((SLogicConditionNode*)pParam1)->condType) { + nodesListAppendList(cond->pParameterList, ((SLogicConditionNode*)pParam1)->pParameterList); + ((SLogicConditionNode*)pParam1)->pParameterList = NULL; + nodesDestroyNode(pParam1); + } else { nodesListAppend(cond->pParameterList, pParam1); - nodesListAppend(cond->pParameterList, pParam2); + } + if (QUERY_NODE_LOGIC_CONDITION == nodeType(pParam2) && type == ((SLogicConditionNode*)pParam2)->condType) { + nodesListAppendList(cond->pParameterList, ((SLogicConditionNode*)pParam2)->pParameterList); + ((SLogicConditionNode*)pParam2)->pParameterList = NULL; + nodesDestroyNode(pParam2); } else { - if (QUERY_NODE_LOGIC_CONDITION == nodeType(pParam1)) { - nodesListAppendList(cond->pParameterList, ((SLogicConditionNode*)pParam1)->pParameterList); - ((SLogicConditionNode*)pParam1)->pParameterList = NULL; - nodesDestroyNode(pParam1); - } else { - nodesListAppend(cond->pParameterList, pParam1); - } - if (QUERY_NODE_LOGIC_CONDITION == nodeType(pParam2)) { - nodesListAppendList(cond->pParameterList, ((SLogicConditionNode*)pParam2)->pParameterList); - ((SLogicConditionNode*)pParam2)->pParameterList = NULL; - nodesDestroyNode(pParam2); - } else { - nodesListAppend(cond->pParameterList, pParam2); - } + nodesListAppend(cond->pParameterList, pParam2); } return (SNode*)cond; } SNode* createOperatorNode(SAstCreateContext* pCxt, EOperatorType type, SNode* pLeft, SNode* pRight) { + CHECK_PARSER_STATUS(pCxt); SOperatorNode* op = (SOperatorNode*)nodesMakeNode(QUERY_NODE_OPERATOR); CHECK_OUT_OF_MEM(op); op->opType = type; @@ -375,17 +380,20 @@ SNode* createOperatorNode(SAstCreateContext* pCxt, EOperatorType type, SNode* pL } SNode* createBetweenAnd(SAstCreateContext* pCxt, SNode* pExpr, SNode* pLeft, SNode* pRight) { + CHECK_PARSER_STATUS(pCxt); return createLogicConditionNode(pCxt, LOGIC_COND_TYPE_AND, createOperatorNode(pCxt, OP_TYPE_GREATER_EQUAL, pExpr, pLeft), createOperatorNode(pCxt, OP_TYPE_LOWER_EQUAL, nodesCloneNode(pExpr), pRight)); } SNode* createNotBetweenAnd(SAstCreateContext* pCxt, SNode* pExpr, SNode* pLeft, SNode* pRight) { + CHECK_PARSER_STATUS(pCxt); return createLogicConditionNode(pCxt, LOGIC_COND_TYPE_OR, createOperatorNode(pCxt, OP_TYPE_LOWER_THAN, pExpr, pLeft), createOperatorNode(pCxt, OP_TYPE_GREATER_THAN, nodesCloneNode(pExpr), pRight)); } static SNode* createPrimaryKeyCol(SAstCreateContext* pCxt) { + CHECK_PARSER_STATUS(pCxt); SColumnNode* pCol = nodesMakeNode(QUERY_NODE_COLUMN); CHECK_OUT_OF_MEM(pCol); pCol->colId = PRIMARYKEY_TIMESTAMP_COL_ID; @@ -394,6 +402,7 @@ static SNode* createPrimaryKeyCol(SAstCreateContext* pCxt) { } SNode* createFunctionNode(SAstCreateContext* pCxt, const SToken* pFuncName, SNodeList* pParameterList) { + CHECK_PARSER_STATUS(pCxt); if (0 == strncasecmp("_rowts", pFuncName->z, pFuncName->n) || 0 == strncasecmp("_c0", pFuncName->z, pFuncName->n)) { return createPrimaryKeyCol(pCxt); } @@ -405,6 +414,7 @@ SNode* createFunctionNode(SAstCreateContext* pCxt, const SToken* pFuncName, SNod } SNode* createCastFunctionNode(SAstCreateContext* pCxt, SNode* pExpr, SDataType dt) { + CHECK_PARSER_STATUS(pCxt); SFunctionNode* func = (SFunctionNode*)nodesMakeNode(QUERY_NODE_FUNCTION); CHECK_OUT_OF_MEM(func); strcpy(func->functionName, "cast"); @@ -419,6 +429,7 @@ SNode* createCastFunctionNode(SAstCreateContext* pCxt, SNode* pExpr, SDataType d } SNode* createNodeListNode(SAstCreateContext* pCxt, SNodeList* pList) { + CHECK_PARSER_STATUS(pCxt); SNodeListNode* list = (SNodeListNode*)nodesMakeNode(QUERY_NODE_NODE_LIST); CHECK_OUT_OF_MEM(list); list->pNodeList = pList; @@ -426,6 +437,7 @@ SNode* createNodeListNode(SAstCreateContext* pCxt, SNodeList* pList) { } SNode* createNodeListNodeEx(SAstCreateContext* pCxt, SNode* p1, SNode* p2) { + CHECK_PARSER_STATUS(pCxt); SNodeListNode* list = (SNodeListNode*)nodesMakeNode(QUERY_NODE_NODE_LIST); CHECK_OUT_OF_MEM(list); list->pNodeList = nodesMakeList(); @@ -436,6 +448,7 @@ SNode* createNodeListNodeEx(SAstCreateContext* pCxt, SNode* p1, SNode* p2) { } SNode* createRealTableNode(SAstCreateContext* pCxt, SToken* pDbName, SToken* pTableName, SToken* pTableAlias) { + CHECK_PARSER_STATUS(pCxt); if (!checkDbName(pCxt, pDbName, true) || !checkTableName(pCxt, pTableName) || !checkTableName(pCxt, pTableAlias)) { return NULL; } @@ -456,6 +469,7 @@ SNode* createRealTableNode(SAstCreateContext* pCxt, SToken* pDbName, SToken* pTa } SNode* createTempTableNode(SAstCreateContext* pCxt, SNode* pSubquery, const SToken* pTableAlias) { + CHECK_PARSER_STATUS(pCxt); STempTableNode* tempTable = (STempTableNode*)nodesMakeNode(QUERY_NODE_TEMP_TABLE); CHECK_OUT_OF_MEM(tempTable); tempTable->pSubquery = pSubquery; @@ -473,6 +487,7 @@ SNode* createTempTableNode(SAstCreateContext* pCxt, SNode* pSubquery, const STok } SNode* createJoinTableNode(SAstCreateContext* pCxt, EJoinType type, SNode* pLeft, SNode* pRight, SNode* pJoinCond) { + CHECK_PARSER_STATUS(pCxt); SJoinTableNode* joinTable = (SJoinTableNode*)nodesMakeNode(QUERY_NODE_JOIN_TABLE); CHECK_OUT_OF_MEM(joinTable); joinTable->joinType = type; @@ -483,6 +498,7 @@ SNode* createJoinTableNode(SAstCreateContext* pCxt, EJoinType type, SNode* pLeft } SNode* createLimitNode(SAstCreateContext* pCxt, const SToken* pLimit, const SToken* pOffset) { + CHECK_PARSER_STATUS(pCxt); SLimitNode* limitNode = (SLimitNode*)nodesMakeNode(QUERY_NODE_LIMIT); CHECK_OUT_OF_MEM(limitNode); limitNode->limit = taosStr2Int64(pLimit->z, NULL, 10); @@ -493,6 +509,7 @@ SNode* createLimitNode(SAstCreateContext* pCxt, const SToken* pLimit, const STok } SNode* createOrderByExprNode(SAstCreateContext* pCxt, SNode* pExpr, EOrder order, ENullOrder nullOrder) { + CHECK_PARSER_STATUS(pCxt); SOrderByExprNode* orderByExpr = (SOrderByExprNode*)nodesMakeNode(QUERY_NODE_ORDER_BY_EXPR); CHECK_OUT_OF_MEM(orderByExpr); orderByExpr->pExpr = pExpr; @@ -505,6 +522,7 @@ SNode* createOrderByExprNode(SAstCreateContext* pCxt, SNode* pExpr, EOrder order } SNode* createSessionWindowNode(SAstCreateContext* pCxt, SNode* pCol, SNode* pGap) { + CHECK_PARSER_STATUS(pCxt); SSessionWindowNode* session = (SSessionWindowNode*)nodesMakeNode(QUERY_NODE_SESSION_WINDOW); CHECK_OUT_OF_MEM(session); session->pCol = (SColumnNode*)pCol; @@ -513,6 +531,7 @@ SNode* createSessionWindowNode(SAstCreateContext* pCxt, SNode* pCol, SNode* pGap } SNode* createStateWindowNode(SAstCreateContext* pCxt, SNode* pExpr) { + CHECK_PARSER_STATUS(pCxt); SStateWindowNode* state = (SStateWindowNode*)nodesMakeNode(QUERY_NODE_STATE_WINDOW); CHECK_OUT_OF_MEM(state); state->pCol = createPrimaryKeyCol(pCxt); @@ -526,6 +545,7 @@ SNode* createStateWindowNode(SAstCreateContext* pCxt, SNode* pExpr) { SNode* createIntervalWindowNode(SAstCreateContext* pCxt, SNode* pInterval, SNode* pOffset, SNode* pSliding, SNode* pFill) { + CHECK_PARSER_STATUS(pCxt); SIntervalWindowNode* interval = (SIntervalWindowNode*)nodesMakeNode(QUERY_NODE_INTERVAL_WINDOW); CHECK_OUT_OF_MEM(interval); interval->pCol = createPrimaryKeyCol(pCxt); @@ -541,6 +561,7 @@ SNode* createIntervalWindowNode(SAstCreateContext* pCxt, SNode* pInterval, SNode } SNode* createFillNode(SAstCreateContext* pCxt, EFillMode mode, SNode* pValues) { + CHECK_PARSER_STATUS(pCxt); SFillNode* fill = (SFillNode*)nodesMakeNode(QUERY_NODE_FILL); CHECK_OUT_OF_MEM(fill); fill->mode = mode; @@ -555,6 +576,7 @@ SNode* createFillNode(SAstCreateContext* pCxt, EFillMode mode, SNode* pValues) { } SNode* createGroupingSetNode(SAstCreateContext* pCxt, SNode* pNode) { + CHECK_PARSER_STATUS(pCxt); SGroupingSetNode* groupingSet = (SGroupingSetNode*)nodesMakeNode(QUERY_NODE_GROUPING_SET); CHECK_OUT_OF_MEM(groupingSet); groupingSet->groupingSetType = GP_TYPE_NORMAL; @@ -564,9 +586,7 @@ SNode* createGroupingSetNode(SAstCreateContext* pCxt, SNode* pNode) { } SNode* setProjectionAlias(SAstCreateContext* pCxt, SNode* pNode, const SToken* pAlias) { - if (NULL == pNode || TSDB_CODE_SUCCESS != pCxt->errCode) { - return pNode; - } + CHECK_PARSER_STATUS(pCxt); int32_t len = TMIN(sizeof(((SExprNode*)pNode)->aliasName) - 1, pAlias->n); strncpy(((SExprNode*)pNode)->aliasName, pAlias->z, len); ((SExprNode*)pNode)->aliasName[len] = '\0'; @@ -576,6 +596,7 @@ SNode* setProjectionAlias(SAstCreateContext* pCxt, SNode* pNode, const SToken* p } SNode* addWhereClause(SAstCreateContext* pCxt, SNode* pStmt, SNode* pWhere) { + CHECK_PARSER_STATUS(pCxt); if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) { ((SSelectStmt*)pStmt)->pWhere = pWhere; } @@ -583,6 +604,7 @@ SNode* addWhereClause(SAstCreateContext* pCxt, SNode* pStmt, SNode* pWhere) { } SNode* addPartitionByClause(SAstCreateContext* pCxt, SNode* pStmt, SNodeList* pPartitionByList) { + CHECK_PARSER_STATUS(pCxt); if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) { ((SSelectStmt*)pStmt)->pPartitionByList = pPartitionByList; } @@ -590,6 +612,7 @@ SNode* addPartitionByClause(SAstCreateContext* pCxt, SNode* pStmt, SNodeList* pP } SNode* addWindowClauseClause(SAstCreateContext* pCxt, SNode* pStmt, SNode* pWindow) { + CHECK_PARSER_STATUS(pCxt); if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) { ((SSelectStmt*)pStmt)->pWindow = pWindow; } @@ -597,6 +620,7 @@ SNode* addWindowClauseClause(SAstCreateContext* pCxt, SNode* pStmt, SNode* pWind } SNode* addGroupByClause(SAstCreateContext* pCxt, SNode* pStmt, SNodeList* pGroupByList) { + CHECK_PARSER_STATUS(pCxt); if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) { ((SSelectStmt*)pStmt)->pGroupByList = pGroupByList; } @@ -604,6 +628,7 @@ SNode* addGroupByClause(SAstCreateContext* pCxt, SNode* pStmt, SNodeList* pGroup } SNode* addHavingClause(SAstCreateContext* pCxt, SNode* pStmt, SNode* pHaving) { + CHECK_PARSER_STATUS(pCxt); if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) { ((SSelectStmt*)pStmt)->pHaving = pHaving; } @@ -611,6 +636,7 @@ SNode* addHavingClause(SAstCreateContext* pCxt, SNode* pStmt, SNode* pHaving) { } SNode* addOrderByClause(SAstCreateContext* pCxt, SNode* pStmt, SNodeList* pOrderByList) { + CHECK_PARSER_STATUS(pCxt); if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) { ((SSelectStmt*)pStmt)->pOrderByList = pOrderByList; } @@ -618,6 +644,7 @@ SNode* addOrderByClause(SAstCreateContext* pCxt, SNode* pStmt, SNodeList* pOrder } SNode* addSlimitClause(SAstCreateContext* pCxt, SNode* pStmt, SNode* pSlimit) { + CHECK_PARSER_STATUS(pCxt); if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) { ((SSelectStmt*)pStmt)->pSlimit = (SLimitNode*)pSlimit; } @@ -625,6 +652,7 @@ SNode* addSlimitClause(SAstCreateContext* pCxt, SNode* pStmt, SNode* pSlimit) { } SNode* addLimitClause(SAstCreateContext* pCxt, SNode* pStmt, SNode* pLimit) { + CHECK_PARSER_STATUS(pCxt); if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) { ((SSelectStmt*)pStmt)->pLimit = (SLimitNode*)pLimit; } @@ -632,6 +660,7 @@ SNode* addLimitClause(SAstCreateContext* pCxt, SNode* pStmt, SNode* pLimit) { } SNode* createSelectStmt(SAstCreateContext* pCxt, bool isDistinct, SNodeList* pProjectionList, SNode* pTable) { + CHECK_PARSER_STATUS(pCxt); SSelectStmt* select = (SSelectStmt*)nodesMakeNode(QUERY_NODE_SELECT_STMT); CHECK_OUT_OF_MEM(select); select->isDistinct = isDistinct; @@ -643,6 +672,7 @@ SNode* createSelectStmt(SAstCreateContext* pCxt, bool isDistinct, SNodeList* pPr } SNode* createSetOperator(SAstCreateContext* pCxt, ESetOperatorType type, SNode* pLeft, SNode* pRight) { + CHECK_PARSER_STATUS(pCxt); SSetOperator* setOp = (SSetOperator*)nodesMakeNode(QUERY_NODE_SET_OPERATOR); CHECK_OUT_OF_MEM(setOp); setOp->opType = type; @@ -653,6 +683,7 @@ SNode* createSetOperator(SAstCreateContext* pCxt, ESetOperatorType type, SNode* } SNode* createDefaultDatabaseOptions(SAstCreateContext* pCxt) { + CHECK_PARSER_STATUS(pCxt); SDatabaseOptions* pOptions = nodesMakeNode(QUERY_NODE_DATABASE_OPTIONS); CHECK_OUT_OF_MEM(pOptions); pOptions->buffer = TSDB_DEFAULT_BUFFER_PER_VNODE; @@ -673,10 +704,12 @@ SNode* createDefaultDatabaseOptions(SAstCreateContext* pCxt) { pOptions->walLevel = TSDB_DEFAULT_WAL_LEVEL; pOptions->numOfVgroups = TSDB_DEFAULT_VN_PER_DB; pOptions->singleStable = TSDB_DEFAULT_DB_SINGLE_STABLE; + pOptions->schemaless = TSDB_DEFAULT_DB_SCHEMALESS; return (SNode*)pOptions; } SNode* createAlterDatabaseOptions(SAstCreateContext* pCxt) { + CHECK_PARSER_STATUS(pCxt); SDatabaseOptions* pOptions = nodesMakeNode(QUERY_NODE_DATABASE_OPTIONS); CHECK_OUT_OF_MEM(pOptions); pOptions->buffer = -1; @@ -697,10 +730,12 @@ SNode* createAlterDatabaseOptions(SAstCreateContext* pCxt) { pOptions->walLevel = -1; pOptions->numOfVgroups = -1; pOptions->singleStable = -1; + pOptions->schemaless = -1; return (SNode*)pOptions; } SNode* setDatabaseOption(SAstCreateContext* pCxt, SNode* pOptions, EDatabaseOptionType type, void* pVal) { + CHECK_PARSER_STATUS(pCxt); switch (type) { case DB_OPTION_BUFFER: ((SDatabaseOptions*)pOptions)->buffer = taosStr2Int32(((SToken*)pVal)->z, NULL, 10); @@ -760,6 +795,9 @@ SNode* setDatabaseOption(SAstCreateContext* pCxt, SNode* pOptions, EDatabaseOpti case DB_OPTION_RETENTIONS: ((SDatabaseOptions*)pOptions)->pRetentions = pVal; break; + case DB_OPTION_SCHEMALESS: + ((SDatabaseOptions*)pOptions)->schemaless = taosStr2Int8(((SToken*)pVal)->z, NULL, 10); + break; default: break; } @@ -767,6 +805,7 @@ SNode* setDatabaseOption(SAstCreateContext* pCxt, SNode* pOptions, EDatabaseOpti } SNode* setAlterDatabaseOption(SAstCreateContext* pCxt, SNode* pOptions, SAlterOption* pAlterOption) { + CHECK_PARSER_STATUS(pCxt); switch (pAlterOption->type) { case DB_OPTION_KEEP: case DB_OPTION_RETENTIONS: @@ -778,6 +817,7 @@ SNode* setAlterDatabaseOption(SAstCreateContext* pCxt, SNode* pOptions, SAlterOp } SNode* createCreateDatabaseStmt(SAstCreateContext* pCxt, bool ignoreExists, SToken* pDbName, SNode* pOptions) { + CHECK_PARSER_STATUS(pCxt); if (!checkDbName(pCxt, pDbName, false)) { return NULL; } @@ -790,6 +830,7 @@ SNode* createCreateDatabaseStmt(SAstCreateContext* pCxt, bool ignoreExists, STok } SNode* createDropDatabaseStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken* pDbName) { + CHECK_PARSER_STATUS(pCxt); if (!checkDbName(pCxt, pDbName, false)) { return NULL; } @@ -801,6 +842,7 @@ SNode* createDropDatabaseStmt(SAstCreateContext* pCxt, bool ignoreNotExists, STo } SNode* createAlterDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName, SNode* pOptions) { + CHECK_PARSER_STATUS(pCxt); if (!checkDbName(pCxt, pDbName, false)) { return NULL; } @@ -812,6 +854,7 @@ SNode* createAlterDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName, SNode* } SNode* createDefaultTableOptions(SAstCreateContext* pCxt) { + CHECK_PARSER_STATUS(pCxt); STableOptions* pOptions = nodesMakeNode(QUERY_NODE_TABLE_OPTIONS); CHECK_OUT_OF_MEM(pOptions); pOptions->delay = TSDB_DEFAULT_ROLLUP_DELAY; @@ -821,6 +864,7 @@ SNode* createDefaultTableOptions(SAstCreateContext* pCxt) { } SNode* createAlterTableOptions(SAstCreateContext* pCxt) { + CHECK_PARSER_STATUS(pCxt); STableOptions* pOptions = nodesMakeNode(QUERY_NODE_TABLE_OPTIONS); CHECK_OUT_OF_MEM(pOptions); pOptions->delay = -1; @@ -830,6 +874,7 @@ SNode* createAlterTableOptions(SAstCreateContext* pCxt) { } SNode* setTableOption(SAstCreateContext* pCxt, SNode* pOptions, ETableOptionType type, void* pVal) { + CHECK_PARSER_STATUS(pCxt); switch (type) { case TABLE_OPTION_COMMENT: if (checkComment(pCxt, (SToken*)pVal, true)) { @@ -859,6 +904,7 @@ SNode* setTableOption(SAstCreateContext* pCxt, SNode* pOptions, ETableOptionType } SNode* createColumnDefNode(SAstCreateContext* pCxt, SToken* pColName, SDataType dataType, const SToken* pComment) { + CHECK_PARSER_STATUS(pCxt); if (!checkColumnName(pCxt, pColName) || !checkComment(pCxt, pComment, false)) { return NULL; } @@ -885,9 +931,7 @@ SDataType createVarLenDataType(uint8_t type, const SToken* pLen) { SNode* createCreateTableStmt(SAstCreateContext* pCxt, bool ignoreExists, SNode* pRealTable, SNodeList* pCols, SNodeList* pTags, SNode* pOptions) { - if (NULL == pRealTable) { - return NULL; - } + CHECK_PARSER_STATUS(pCxt); SCreateTableStmt* pStmt = (SCreateTableStmt*)nodesMakeNode(QUERY_NODE_CREATE_TABLE_STMT); CHECK_OUT_OF_MEM(pStmt); strcpy(pStmt->dbName, ((SRealTableNode*)pRealTable)->table.dbName); @@ -902,9 +946,7 @@ SNode* createCreateTableStmt(SAstCreateContext* pCxt, bool ignoreExists, SNode* SNode* createCreateSubTableClause(SAstCreateContext* pCxt, bool ignoreExists, SNode* pRealTable, SNode* pUseRealTable, SNodeList* pSpecificTags, SNodeList* pValsOfTags, SNode* pOptions) { - if (NULL == pRealTable) { - return NULL; - } + CHECK_PARSER_STATUS(pCxt); SCreateSubTableClause* pStmt = nodesMakeNode(QUERY_NODE_CREATE_SUBTABLE_CLAUSE); CHECK_OUT_OF_MEM(pStmt); strcpy(pStmt->dbName, ((SRealTableNode*)pRealTable)->table.dbName); @@ -920,6 +962,7 @@ SNode* createCreateSubTableClause(SAstCreateContext* pCxt, bool ignoreExists, SN } SNode* createCreateMultiTableStmt(SAstCreateContext* pCxt, SNodeList* pSubTables) { + CHECK_PARSER_STATUS(pCxt); SCreateMultiTableStmt* pStmt = nodesMakeNode(QUERY_NODE_CREATE_MULTI_TABLE_STMT); CHECK_OUT_OF_MEM(pStmt); pStmt->pSubTables = pSubTables; @@ -927,9 +970,7 @@ SNode* createCreateMultiTableStmt(SAstCreateContext* pCxt, SNodeList* pSubTables } SNode* createDropTableClause(SAstCreateContext* pCxt, bool ignoreNotExists, SNode* pRealTable) { - if (NULL == pRealTable) { - return NULL; - } + CHECK_PARSER_STATUS(pCxt); SDropTableClause* pStmt = nodesMakeNode(QUERY_NODE_DROP_TABLE_CLAUSE); CHECK_OUT_OF_MEM(pStmt); strcpy(pStmt->dbName, ((SRealTableNode*)pRealTable)->table.dbName); @@ -940,6 +981,7 @@ SNode* createDropTableClause(SAstCreateContext* pCxt, bool ignoreNotExists, SNod } SNode* createDropTableStmt(SAstCreateContext* pCxt, SNodeList* pTables) { + CHECK_PARSER_STATUS(pCxt); SDropTableStmt* pStmt = nodesMakeNode(QUERY_NODE_DROP_TABLE_STMT); CHECK_OUT_OF_MEM(pStmt); pStmt->pTables = pTables; @@ -947,6 +989,7 @@ SNode* createDropTableStmt(SAstCreateContext* pCxt, SNodeList* pTables) { } SNode* createDropSuperTableStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SNode* pRealTable) { + CHECK_PARSER_STATUS(pCxt); SDropSuperTableStmt* pStmt = nodesMakeNode(QUERY_NODE_DROP_SUPER_TABLE_STMT); CHECK_OUT_OF_MEM(pStmt); strcpy(pStmt->dbName, ((SRealTableNode*)pRealTable)->table.dbName); @@ -964,9 +1007,7 @@ static SNode* createAlterTableStmtFinalize(SNode* pRealTable, SAlterTableStmt* p } SNode* createAlterTableModifyOptions(SAstCreateContext* pCxt, SNode* pRealTable, SNode* pOptions) { - if (NULL == pRealTable) { - return NULL; - } + CHECK_PARSER_STATUS(pCxt); SAlterTableStmt* pStmt = nodesMakeNode(QUERY_NODE_ALTER_TABLE_STMT); CHECK_OUT_OF_MEM(pStmt); pStmt->alterType = TSDB_ALTER_TABLE_UPDATE_OPTIONS; @@ -974,9 +1015,10 @@ SNode* createAlterTableModifyOptions(SAstCreateContext* pCxt, SNode* pRealTable, return createAlterTableStmtFinalize(pRealTable, pStmt); } -SNode* createAlterTableAddModifyCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_t alterType, - const SToken* pColName, SDataType dataType) { - if (NULL == pRealTable) { +SNode* createAlterTableAddModifyCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_t alterType, SToken* pColName, + SDataType dataType) { + CHECK_PARSER_STATUS(pCxt); + if (!checkColumnName(pCxt, pColName)) { return NULL; } SAlterTableStmt* pStmt = nodesMakeNode(QUERY_NODE_ALTER_TABLE_STMT); @@ -987,8 +1029,9 @@ SNode* createAlterTableAddModifyCol(SAstCreateContext* pCxt, SNode* pRealTable, return createAlterTableStmtFinalize(pRealTable, pStmt); } -SNode* createAlterTableDropCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_t alterType, const SToken* pColName) { - if (NULL == pRealTable) { +SNode* createAlterTableDropCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_t alterType, SToken* pColName) { + CHECK_PARSER_STATUS(pCxt); + if (!checkColumnName(pCxt, pColName)) { return NULL; } SAlterTableStmt* pStmt = nodesMakeNode(QUERY_NODE_ALTER_TABLE_STMT); @@ -998,9 +1041,10 @@ SNode* createAlterTableDropCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_ return createAlterTableStmtFinalize(pRealTable, pStmt); } -SNode* createAlterTableRenameCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_t alterType, - const SToken* pOldColName, const SToken* pNewColName) { - if (NULL == pRealTable) { +SNode* createAlterTableRenameCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_t alterType, SToken* pOldColName, + SToken* pNewColName) { + CHECK_PARSER_STATUS(pCxt); + if (!checkColumnName(pCxt, pOldColName) || !checkColumnName(pCxt, pNewColName)) { return NULL; } SAlterTableStmt* pStmt = nodesMakeNode(QUERY_NODE_ALTER_TABLE_STMT); @@ -1011,8 +1055,9 @@ SNode* createAlterTableRenameCol(SAstCreateContext* pCxt, SNode* pRealTable, int return createAlterTableStmtFinalize(pRealTable, pStmt); } -SNode* createAlterTableSetTag(SAstCreateContext* pCxt, SNode* pRealTable, const SToken* pTagName, SNode* pVal) { - if (NULL == pRealTable) { +SNode* createAlterTableSetTag(SAstCreateContext* pCxt, SNode* pRealTable, SToken* pTagName, SNode* pVal) { + CHECK_PARSER_STATUS(pCxt); + if (!checkColumnName(pCxt, pTagName)) { return NULL; } SAlterTableStmt* pStmt = nodesMakeNode(QUERY_NODE_ALTER_TABLE_STMT); @@ -1024,6 +1069,7 @@ SNode* createAlterTableSetTag(SAstCreateContext* pCxt, SNode* pRealTable, const } SNode* createUseDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName) { + CHECK_PARSER_STATUS(pCxt); if (!checkDbName(pCxt, pDbName, false)) { return NULL; } @@ -1039,13 +1085,13 @@ static bool needDbShowStmt(ENodeType type) { } SNode* createShowStmt(SAstCreateContext* pCxt, ENodeType type, SNode* pDbName, SNode* pTbNamePattern) { + CHECK_PARSER_STATUS(pCxt); if (needDbShowStmt(type) && NULL == pDbName && NULL == pCxt->pQueryCxt->db) { snprintf(pCxt->pQueryCxt->pMsg, pCxt->pQueryCxt->msgLen, "db not specified"); pCxt->errCode = TSDB_CODE_PAR_SYNTAX_ERROR; return NULL; } SShowStmt* pStmt = nodesMakeNode(type); - ; CHECK_OUT_OF_MEM(pStmt); pStmt->pDbName = pDbName; pStmt->pTbNamePattern = pTbNamePattern; @@ -1053,18 +1099,21 @@ SNode* createShowStmt(SAstCreateContext* pCxt, ENodeType type, SNode* pDbName, S } SNode* createShowCreateDatabaseStmt(SAstCreateContext* pCxt, const SToken* pDbName) { + CHECK_PARSER_STATUS(pCxt); SNode* pStmt = nodesMakeNode(QUERY_NODE_SHOW_CREATE_DATABASE_STMT); CHECK_OUT_OF_MEM(pStmt); return pStmt; } SNode* createShowCreateTableStmt(SAstCreateContext* pCxt, ENodeType type, SNode* pRealTable) { + CHECK_PARSER_STATUS(pCxt); SNode* pStmt = nodesMakeNode(type); CHECK_OUT_OF_MEM(pStmt); return pStmt; } SNode* createCreateUserStmt(SAstCreateContext* pCxt, SToken* pUserName, const SToken* pPassword) { + CHECK_PARSER_STATUS(pCxt); char password[TSDB_USET_PASSWORD_LEN] = {0}; if (!checkUserName(pCxt, pUserName) || !checkPassword(pCxt, pPassword, password)) { return NULL; @@ -1077,6 +1126,7 @@ SNode* createCreateUserStmt(SAstCreateContext* pCxt, SToken* pUserName, const ST } SNode* createAlterUserStmt(SAstCreateContext* pCxt, SToken* pUserName, int8_t alterType, const SToken* pVal) { + CHECK_PARSER_STATUS(pCxt); if (!checkUserName(pCxt, pUserName)) { return NULL; } @@ -1096,6 +1146,7 @@ SNode* createAlterUserStmt(SAstCreateContext* pCxt, SToken* pUserName, int8_t al } SNode* createDropUserStmt(SAstCreateContext* pCxt, SToken* pUserName) { + CHECK_PARSER_STATUS(pCxt); if (!checkUserName(pCxt, pUserName)) { return NULL; } @@ -1106,6 +1157,7 @@ SNode* createDropUserStmt(SAstCreateContext* pCxt, SToken* pUserName) { } SNode* createCreateDnodeStmt(SAstCreateContext* pCxt, const SToken* pFqdn, const SToken* pPort) { + CHECK_PARSER_STATUS(pCxt); int32_t port = 0; char fqdn[TSDB_FQDN_LEN] = {0}; if (NULL == pPort) { @@ -1127,6 +1179,7 @@ SNode* createCreateDnodeStmt(SAstCreateContext* pCxt, const SToken* pFqdn, const } SNode* createDropDnodeStmt(SAstCreateContext* pCxt, const SToken* pDnode) { + CHECK_PARSER_STATUS(pCxt); SDropDnodeStmt* pStmt = (SDropDnodeStmt*)nodesMakeNode(QUERY_NODE_DROP_DNODE_STMT); CHECK_OUT_OF_MEM(pStmt); if (TK_NK_INTEGER == pDnode->type) { @@ -1142,6 +1195,7 @@ SNode* createDropDnodeStmt(SAstCreateContext* pCxt, const SToken* pDnode) { SNode* createAlterDnodeStmt(SAstCreateContext* pCxt, const SToken* pDnode, const SToken* pConfig, const SToken* pValue) { + CHECK_PARSER_STATUS(pCxt); SAlterDnodeStmt* pStmt = nodesMakeNode(QUERY_NODE_ALTER_DNODE_STMT); CHECK_OUT_OF_MEM(pStmt); pStmt->dnodeId = taosStr2Int32(pDnode->z, NULL, 10); @@ -1154,6 +1208,7 @@ SNode* createAlterDnodeStmt(SAstCreateContext* pCxt, const SToken* pDnode, const SNode* createCreateIndexStmt(SAstCreateContext* pCxt, EIndexType type, bool ignoreExists, SToken* pIndexName, SToken* pTableName, SNodeList* pCols, SNode* pOptions) { + CHECK_PARSER_STATUS(pCxt); if (!checkIndexName(pCxt, pIndexName) || !checkTableName(pCxt, pTableName) || !checkDbName(pCxt, NULL, true)) { return NULL; } @@ -1170,6 +1225,7 @@ SNode* createCreateIndexStmt(SAstCreateContext* pCxt, EIndexType type, bool igno SNode* createIndexOption(SAstCreateContext* pCxt, SNodeList* pFuncs, SNode* pInterval, SNode* pOffset, SNode* pSliding) { + CHECK_PARSER_STATUS(pCxt); SIndexOptions* pOptions = nodesMakeNode(QUERY_NODE_INDEX_OPTIONS); CHECK_OUT_OF_MEM(pOptions); pOptions->pFuncs = pFuncs; @@ -1180,6 +1236,7 @@ SNode* createIndexOption(SAstCreateContext* pCxt, SNodeList* pFuncs, SNode* pInt } SNode* createDropIndexStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken* pIndexName, SToken* pTableName) { + CHECK_PARSER_STATUS(pCxt); if (!checkIndexName(pCxt, pIndexName) || !checkTableName(pCxt, pTableName)) { return NULL; } @@ -1192,6 +1249,7 @@ SNode* createDropIndexStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken } SNode* createCreateComponentNodeStmt(SAstCreateContext* pCxt, ENodeType type, const SToken* pDnodeId) { + CHECK_PARSER_STATUS(pCxt); SCreateComponentNodeStmt* pStmt = nodesMakeNode(type); CHECK_OUT_OF_MEM(pStmt); pStmt->dnodeId = taosStr2Int32(pDnodeId->z, NULL, 10); @@ -1200,6 +1258,7 @@ SNode* createCreateComponentNodeStmt(SAstCreateContext* pCxt, ENodeType type, co } SNode* createDropComponentNodeStmt(SAstCreateContext* pCxt, ENodeType type, const SToken* pDnodeId) { + CHECK_PARSER_STATUS(pCxt); SDropComponentNodeStmt* pStmt = nodesMakeNode(type); CHECK_OUT_OF_MEM(pStmt); pStmt->dnodeId = taosStr2Int32(pDnodeId->z, NULL, 10); @@ -1208,6 +1267,7 @@ SNode* createDropComponentNodeStmt(SAstCreateContext* pCxt, ENodeType type, cons } SNode* createTopicOptions(SAstCreateContext* pCxt) { + CHECK_PARSER_STATUS(pCxt); STopicOptions* pOptions = nodesMakeNode(QUERY_NODE_TOPIC_OPTIONS); CHECK_OUT_OF_MEM(pOptions); pOptions->withTable = false; @@ -1218,6 +1278,7 @@ SNode* createTopicOptions(SAstCreateContext* pCxt) { SNode* createCreateTopicStmt(SAstCreateContext* pCxt, bool ignoreExists, const SToken* pTopicName, SNode* pQuery, const SToken* pSubscribeDbName, SNode* pOptions) { + CHECK_PARSER_STATUS(pCxt); SCreateTopicStmt* pStmt = nodesMakeNode(QUERY_NODE_CREATE_TOPIC_STMT); CHECK_OUT_OF_MEM(pStmt); strncpy(pStmt->topicName, pTopicName->z, pTopicName->n); @@ -1231,6 +1292,7 @@ SNode* createCreateTopicStmt(SAstCreateContext* pCxt, bool ignoreExists, const S } SNode* createDropTopicStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const SToken* pTopicName) { + CHECK_PARSER_STATUS(pCxt); SDropTopicStmt* pStmt = nodesMakeNode(QUERY_NODE_DROP_TOPIC_STMT); CHECK_OUT_OF_MEM(pStmt); strncpy(pStmt->topicName, pTopicName->z, pTopicName->n); @@ -1238,7 +1300,19 @@ SNode* createDropTopicStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const return (SNode*)pStmt; } +SNode* createDropCGroupStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const SToken* pCGroupId, + const SToken* pTopicName) { + CHECK_PARSER_STATUS(pCxt); + SDropCGroupStmt* pStmt = nodesMakeNode(QUERY_NODE_DROP_CGROUP_STMT); + CHECK_OUT_OF_MEM(pStmt); + pStmt->ignoreNotExists = ignoreNotExists; + strncpy(pStmt->topicName, pTopicName->z, pTopicName->n); + strncpy(pStmt->cgroup, pCGroupId->z, pCGroupId->n); + return (SNode*)pStmt; +} + SNode* createAlterLocalStmt(SAstCreateContext* pCxt, const SToken* pConfig, const SToken* pValue) { + CHECK_PARSER_STATUS(pCxt); SAlterLocalStmt* pStmt = nodesMakeNode(QUERY_NODE_ALTER_LOCAL_STMT); CHECK_OUT_OF_MEM(pStmt); trimString(pConfig->z, pConfig->n, pStmt->config, sizeof(pStmt->config)); @@ -1249,6 +1323,7 @@ SNode* createAlterLocalStmt(SAstCreateContext* pCxt, const SToken* pConfig, cons } SNode* createDefaultExplainOptions(SAstCreateContext* pCxt) { + CHECK_PARSER_STATUS(pCxt); SExplainOptions* pOptions = nodesMakeNode(QUERY_NODE_EXPLAIN_OPTIONS); CHECK_OUT_OF_MEM(pOptions); pOptions->verbose = TSDB_DEFAULT_EXPLAIN_VERBOSE; @@ -1257,16 +1332,19 @@ SNode* createDefaultExplainOptions(SAstCreateContext* pCxt) { } SNode* setExplainVerbose(SAstCreateContext* pCxt, SNode* pOptions, const SToken* pVal) { + CHECK_PARSER_STATUS(pCxt); ((SExplainOptions*)pOptions)->verbose = (0 == strncasecmp(pVal->z, "true", pVal->n)); return pOptions; } SNode* setExplainRatio(SAstCreateContext* pCxt, SNode* pOptions, const SToken* pVal) { + CHECK_PARSER_STATUS(pCxt); ((SExplainOptions*)pOptions)->ratio = taosStr2Double(pVal->z, NULL); return pOptions; } SNode* createExplainStmt(SAstCreateContext* pCxt, bool analyze, SNode* pOptions, SNode* pQuery) { + CHECK_PARSER_STATUS(pCxt); SExplainStmt* pStmt = nodesMakeNode(QUERY_NODE_EXPLAIN_STMT); CHECK_OUT_OF_MEM(pStmt); pStmt->analyze = analyze; @@ -1276,9 +1354,7 @@ SNode* createExplainStmt(SAstCreateContext* pCxt, bool analyze, SNode* pOptions, } SNode* createDescribeStmt(SAstCreateContext* pCxt, SNode* pRealTable) { - if (NULL == pRealTable) { - return NULL; - } + CHECK_PARSER_STATUS(pCxt); SDescribeStmt* pStmt = nodesMakeNode(QUERY_NODE_DESCRIBE_STMT); CHECK_OUT_OF_MEM(pStmt); strcpy(pStmt->dbName, ((SRealTableNode*)pRealTable)->table.dbName); @@ -1288,12 +1364,14 @@ SNode* createDescribeStmt(SAstCreateContext* pCxt, SNode* pRealTable) { } SNode* createResetQueryCacheStmt(SAstCreateContext* pCxt) { + CHECK_PARSER_STATUS(pCxt); SNode* pStmt = nodesMakeNode(QUERY_NODE_RESET_QUERY_CACHE_STMT); CHECK_OUT_OF_MEM(pStmt); return pStmt; } SNode* createCompactStmt(SAstCreateContext* pCxt, SNodeList* pVgroups) { + CHECK_PARSER_STATUS(pCxt); SNode* pStmt = nodesMakeNode(QUERY_NODE_COMPACT_STMT); CHECK_OUT_OF_MEM(pStmt); return pStmt; @@ -1301,6 +1379,7 @@ SNode* createCompactStmt(SAstCreateContext* pCxt, SNodeList* pVgroups) { SNode* createCreateFunctionStmt(SAstCreateContext* pCxt, bool ignoreExists, bool aggFunc, const SToken* pFuncName, const SToken* pLibPath, SDataType dataType, int32_t bufSize) { + CHECK_PARSER_STATUS(pCxt); if (pLibPath->n <= 2) { pCxt->errCode = TSDB_CODE_PAR_SYNTAX_ERROR; return NULL; @@ -1317,6 +1396,7 @@ SNode* createCreateFunctionStmt(SAstCreateContext* pCxt, bool ignoreExists, bool } SNode* createDropFunctionStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const SToken* pFuncName) { + CHECK_PARSER_STATUS(pCxt); SDropFunctionStmt* pStmt = nodesMakeNode(QUERY_NODE_DROP_FUNCTION_STMT); CHECK_OUT_OF_MEM(pStmt); pStmt->ignoreNotExists = ignoreNotExists; @@ -1325,6 +1405,7 @@ SNode* createDropFunctionStmt(SAstCreateContext* pCxt, bool ignoreNotExists, con } SNode* createStreamOptions(SAstCreateContext* pCxt) { + CHECK_PARSER_STATUS(pCxt); SStreamOptions* pOptions = nodesMakeNode(QUERY_NODE_STREAM_OPTIONS); CHECK_OUT_OF_MEM(pOptions); pOptions->triggerType = STREAM_TRIGGER_AT_ONCE; @@ -1333,6 +1414,7 @@ SNode* createStreamOptions(SAstCreateContext* pCxt) { SNode* createCreateStreamStmt(SAstCreateContext* pCxt, bool ignoreExists, const SToken* pStreamName, SNode* pRealTable, SNode* pOptions, SNode* pQuery) { + CHECK_PARSER_STATUS(pCxt); SCreateStreamStmt* pStmt = nodesMakeNode(QUERY_NODE_CREATE_STREAM_STMT); CHECK_OUT_OF_MEM(pStmt); strncpy(pStmt->streamName, pStreamName->z, pStreamName->n); @@ -1348,6 +1430,7 @@ SNode* createCreateStreamStmt(SAstCreateContext* pCxt, bool ignoreExists, const } SNode* createDropStreamStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const SToken* pStreamName) { + CHECK_PARSER_STATUS(pCxt); SDropStreamStmt* pStmt = nodesMakeNode(QUERY_NODE_DROP_STREAM_STMT); CHECK_OUT_OF_MEM(pStmt); strncpy(pStmt->streamName, pStreamName->z, pStreamName->n); @@ -1356,6 +1439,7 @@ SNode* createDropStreamStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const } SNode* createKillStmt(SAstCreateContext* pCxt, ENodeType type, const SToken* pId) { + CHECK_PARSER_STATUS(pCxt); SKillStmt* pStmt = nodesMakeNode(type); CHECK_OUT_OF_MEM(pStmt); pStmt->targetId = taosStr2Int32(pId->z, NULL, 10); @@ -1363,30 +1447,35 @@ SNode* createKillStmt(SAstCreateContext* pCxt, ENodeType type, const SToken* pId } SNode* createMergeVgroupStmt(SAstCreateContext* pCxt, const SToken* pVgId1, const SToken* pVgId2) { + CHECK_PARSER_STATUS(pCxt); SNode* pStmt = nodesMakeNode(QUERY_NODE_MERGE_VGROUP_STMT); CHECK_OUT_OF_MEM(pStmt); return pStmt; } SNode* createRedistributeVgroupStmt(SAstCreateContext* pCxt, const SToken* pVgId, SNodeList* pDnodes) { + CHECK_PARSER_STATUS(pCxt); SNode* pStmt = nodesMakeNode(QUERY_NODE_REDISTRIBUTE_VGROUP_STMT); CHECK_OUT_OF_MEM(pStmt); return pStmt; } SNode* createSplitVgroupStmt(SAstCreateContext* pCxt, const SToken* pVgId) { + CHECK_PARSER_STATUS(pCxt); SNode* pStmt = nodesMakeNode(QUERY_NODE_SPLIT_VGROUP_STMT); CHECK_OUT_OF_MEM(pStmt); return pStmt; } SNode* createSyncdbStmt(SAstCreateContext* pCxt, const SToken* pDbName) { + CHECK_PARSER_STATUS(pCxt); SNode* pStmt = nodesMakeNode(QUERY_NODE_SYNCDB_STMT); CHECK_OUT_OF_MEM(pStmt); return pStmt; } SNode* createGrantStmt(SAstCreateContext* pCxt, int64_t privileges, SToken* pDbName, SToken* pUserName) { + CHECK_PARSER_STATUS(pCxt); if (!checkDbName(pCxt, pDbName, false) || !checkUserName(pCxt, pUserName)) { return NULL; } @@ -1399,6 +1488,7 @@ SNode* createGrantStmt(SAstCreateContext* pCxt, int64_t privileges, SToken* pDbN } SNode* createRevokeStmt(SAstCreateContext* pCxt, int64_t privileges, SToken* pDbName, SToken* pUserName) { + CHECK_PARSER_STATUS(pCxt); if (!checkDbName(pCxt, pDbName, false) || !checkUserName(pCxt, pUserName)) { return NULL; } diff --git a/source/libs/parser/src/parAstParser.c b/source/libs/parser/src/parAstParser.c index 5b59d1c080978217577581184834595432d6edc7..68c9684c97ac8eba986a339b7618e51bc02d7d79 100644 --- a/source/libs/parser/src/parAstParser.c +++ b/source/libs/parser/src/parAstParser.c @@ -13,11 +13,12 @@ * along with this program. If not, see . */ +#include "functionMgt.h" #include "os.h" -#include "parInt.h" - #include "parAst.h" +#include "parInt.h" #include "parToken.h" +#include "systable.h" typedef void* (*FMalloc)(size_t); typedef void (*FFree)(void*); @@ -86,3 +87,317 @@ abort_parse: taosArrayDestroy(cxt.pPlaceholderValues); return cxt.errCode; } + +typedef struct SCollectMetaKeyCxt { + SParseContext* pParseCxt; + SParseMetaCache* pMetaCache; +} SCollectMetaKeyCxt; + +static void destroyCollectMetaKeyCxt(SCollectMetaKeyCxt* pCxt) { + if (NULL != pCxt->pMetaCache) { + // TODO + } +} + +typedef struct SCollectMetaKeyFromExprCxt { + SCollectMetaKeyCxt* pComCxt; + int32_t errCode; +} SCollectMetaKeyFromExprCxt; + +static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt); + +static EDealRes collectMetaKeyFromFunction(SCollectMetaKeyFromExprCxt* pCxt, SFunctionNode* pFunc) { + if (fmIsBuiltinFunc(pFunc->functionName)) { + return TSDB_CODE_SUCCESS; + } + return reserveUdfInCache(pFunc->functionName, pCxt->pComCxt->pMetaCache); +} + +static EDealRes collectMetaKeyFromRealTable(SCollectMetaKeyFromExprCxt* pCxt, SRealTableNode* pRealTable) { + pCxt->errCode = reserveTableMetaInCache(pCxt->pComCxt->pParseCxt->acctId, pRealTable->table.dbName, + pRealTable->table.tableName, pCxt->pComCxt->pMetaCache); + if (TSDB_CODE_SUCCESS == pCxt->errCode) { + pCxt->errCode = reserveTableVgroupInCache(pCxt->pComCxt->pParseCxt->acctId, pRealTable->table.dbName, + pRealTable->table.tableName, pCxt->pComCxt->pMetaCache); + } + if (TSDB_CODE_SUCCESS == pCxt->errCode) { + pCxt->errCode = reserveUserAuthInCache(pCxt->pComCxt->pParseCxt->acctId, pCxt->pComCxt->pParseCxt->pUser, + pRealTable->table.dbName, AUTH_TYPE_READ, pCxt->pComCxt->pMetaCache); + } + return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR; +} + +static EDealRes collectMetaKeyFromTempTable(SCollectMetaKeyFromExprCxt* pCxt, STempTableNode* pTempTable) { + pCxt->errCode = collectMetaKeyFromQuery(pCxt->pComCxt, pTempTable->pSubquery); + return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR; +} + +static EDealRes collectMetaKeyFromExprImpl(SNode* pNode, void* pContext) { + SCollectMetaKeyFromExprCxt* pCxt = pContext; + switch (nodeType(pNode)) { + case QUERY_NODE_FUNCTION: + return collectMetaKeyFromFunction(pCxt, (SFunctionNode*)pNode); + case QUERY_NODE_REAL_TABLE: + return collectMetaKeyFromRealTable(pCxt, (SRealTableNode*)pNode); + case QUERY_NODE_TEMP_TABLE: + return collectMetaKeyFromTempTable(pCxt, (STempTableNode*)pNode); + default: + break; + } + return DEAL_RES_CONTINUE; +} + +static int32_t collectMetaKeyFromExprs(SCollectMetaKeyCxt* pCxt, SNodeList* pList) { + SCollectMetaKeyFromExprCxt cxt = {.pComCxt = pCxt, .errCode = TSDB_CODE_SUCCESS}; + nodesWalkExprs(pList, collectMetaKeyFromExprImpl, &cxt); + return cxt.errCode; +} + +static int32_t collectMetaKeyFromSetOperator(SCollectMetaKeyCxt* pCxt, SSetOperator* pStmt) { + int32_t code = collectMetaKeyFromQuery(pCxt, pStmt->pLeft); + if (TSDB_CODE_SUCCESS == code) { + code = collectMetaKeyFromQuery(pCxt, pStmt->pRight); + } + if (TSDB_CODE_SUCCESS == code) { + code = collectMetaKeyFromExprs(pCxt, pStmt->pOrderByList); + } + return code; +} + +static int32_t collectMetaKeyFromSelect(SCollectMetaKeyCxt* pCxt, SSelectStmt* pStmt) { + SCollectMetaKeyFromExprCxt cxt = {.pComCxt = pCxt, .errCode = TSDB_CODE_SUCCESS}; + nodesWalkSelectStmt(pStmt, SQL_CLAUSE_FROM, collectMetaKeyFromExprImpl, &cxt); + return cxt.errCode; +} + +static int32_t collectMetaKeyFromCreateTable(SCollectMetaKeyCxt* pCxt, SCreateTableStmt* pStmt) { + if (NULL == pStmt->pTags) { + return reserveTableVgroupInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, pCxt->pMetaCache); + } else { + return reserveDbCfgInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pCxt->pMetaCache); + } +} + +static int32_t collectMetaKeyFromCreateMultiTable(SCollectMetaKeyCxt* pCxt, SCreateMultiTableStmt* pStmt) { + int32_t code = TSDB_CODE_SUCCESS; + SNode* pNode = NULL; + FOREACH(pNode, pStmt->pSubTables) { + SCreateSubTableClause* pClause = (SCreateSubTableClause*)pNode; + code = + reserveTableMetaInCache(pCxt->pParseCxt->acctId, pClause->useDbName, pClause->useTableName, pCxt->pMetaCache); + if (TSDB_CODE_SUCCESS == code) { + code = reserveTableVgroupInCache(pCxt->pParseCxt->acctId, pClause->dbName, pClause->tableName, pCxt->pMetaCache); + } + if (TSDB_CODE_SUCCESS != code) { + break; + } + } + return code; +} + +static int32_t collectMetaKeyFromAlterTable(SCollectMetaKeyCxt* pCxt, SAlterTableStmt* pStmt) { + int32_t code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, pCxt->pMetaCache); + if (TSDB_CODE_SUCCESS == code) { + code = reserveTableVgroupInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, pCxt->pMetaCache); + } + return code; +} + +static int32_t collectMetaKeyFromUseDatabase(SCollectMetaKeyCxt* pCxt, SUseDatabaseStmt* pStmt) { + return reserveDbVgVersionInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromCreateIndex(SCollectMetaKeyCxt* pCxt, SCreateIndexStmt* pStmt) { + int32_t code = TSDB_CODE_SUCCESS; + if (INDEX_TYPE_SMA == pStmt->indexType) { + code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, pCxt->pParseCxt->db, pStmt->tableName, pCxt->pMetaCache); + if (TSDB_CODE_SUCCESS == code) { + code = + reserveTableVgroupInCache(pCxt->pParseCxt->acctId, pCxt->pParseCxt->db, pStmt->tableName, pCxt->pMetaCache); + } + } + return code; +} + +static int32_t collectMetaKeyFromCreateTopic(SCollectMetaKeyCxt* pCxt, SCreateTopicStmt* pStmt) { + if (NULL != pStmt->pQuery) { + return collectMetaKeyFromQuery(pCxt, pStmt->pQuery); + } + return TSDB_CODE_SUCCESS; +} + +static int32_t collectMetaKeyFromExplain(SCollectMetaKeyCxt* pCxt, SExplainStmt* pStmt) { + return collectMetaKeyFromQuery(pCxt, pStmt->pQuery); +} + +static int32_t collectMetaKeyFromCreateStream(SCollectMetaKeyCxt* pCxt, SCreateStreamStmt* pStmt) { + return collectMetaKeyFromQuery(pCxt, pStmt->pQuery); +} + +static int32_t collectMetaKeyFromShowDnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_DNODES, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowMnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_MNODES, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowModules(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_MODULES, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowQnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_QNODES, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowSnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_SNODES, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowBnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_BNODES, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowDatabases(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USER_DATABASES, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowFunctions(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USER_FUNCTIONS, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowIndexes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USER_INDEXES, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowStables(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USER_STABLES, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowStreams(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_STREAMS, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowTables(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + int32_t code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, + TSDB_INS_TABLE_USER_TABLES, pCxt->pMetaCache); + if (TSDB_CODE_SUCCESS == code) { + if (NULL != pStmt->pDbName) { + code = reserveDbVgInfoInCache(pCxt->pParseCxt->acctId, ((SValueNode*)pStmt->pDbName)->literal, pCxt->pMetaCache); + } else { + code = reserveDbVgInfoInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, pCxt->pMetaCache); + } + } + return code; +} + +static int32_t collectMetaKeyFromShowUsers(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USER_USERS, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowLicence(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_LICENCES, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowVgroups(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_VGROUPS, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowTopics(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_TOPICS, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowTransactions(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_TRANS, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt) { + switch (nodeType(pStmt)) { + case QUERY_NODE_SET_OPERATOR: + return collectMetaKeyFromSetOperator(pCxt, (SSetOperator*)pStmt); + case QUERY_NODE_SELECT_STMT: + return collectMetaKeyFromSelect(pCxt, (SSelectStmt*)pStmt); + case QUERY_NODE_CREATE_TABLE_STMT: + return collectMetaKeyFromCreateTable(pCxt, (SCreateTableStmt*)pStmt); + case QUERY_NODE_CREATE_MULTI_TABLE_STMT: + return collectMetaKeyFromCreateMultiTable(pCxt, (SCreateMultiTableStmt*)pStmt); + case QUERY_NODE_ALTER_TABLE_STMT: + return collectMetaKeyFromAlterTable(pCxt, (SAlterTableStmt*)pStmt); + case QUERY_NODE_USE_DATABASE_STMT: + return collectMetaKeyFromUseDatabase(pCxt, (SUseDatabaseStmt*)pStmt); + case QUERY_NODE_CREATE_INDEX_STMT: + return collectMetaKeyFromCreateIndex(pCxt, (SCreateIndexStmt*)pStmt); + case QUERY_NODE_CREATE_TOPIC_STMT: + return collectMetaKeyFromCreateTopic(pCxt, (SCreateTopicStmt*)pStmt); + case QUERY_NODE_EXPLAIN_STMT: + return collectMetaKeyFromExplain(pCxt, (SExplainStmt*)pStmt); + case QUERY_NODE_CREATE_STREAM_STMT: + return collectMetaKeyFromCreateStream(pCxt, (SCreateStreamStmt*)pStmt); + case QUERY_NODE_SHOW_DNODES_STMT: + return collectMetaKeyFromShowDnodes(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_MNODES_STMT: + return collectMetaKeyFromShowMnodes(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_MODULES_STMT: + return collectMetaKeyFromShowModules(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_QNODES_STMT: + return collectMetaKeyFromShowQnodes(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_SNODES_STMT: + return collectMetaKeyFromShowSnodes(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_BNODES_STMT: + return collectMetaKeyFromShowBnodes(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_DATABASES_STMT: + return collectMetaKeyFromShowDatabases(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_FUNCTIONS_STMT: + return collectMetaKeyFromShowFunctions(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_INDEXES_STMT: + return collectMetaKeyFromShowIndexes(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_STABLES_STMT: + return collectMetaKeyFromShowStables(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_STREAMS_STMT: + return collectMetaKeyFromShowStreams(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_TABLES_STMT: + return collectMetaKeyFromShowTables(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_USERS_STMT: + return collectMetaKeyFromShowUsers(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_LICENCE_STMT: + return collectMetaKeyFromShowLicence(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_VGROUPS_STMT: + return collectMetaKeyFromShowVgroups(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_TOPICS_STMT: + return collectMetaKeyFromShowTopics(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_TRANSACTIONS_STMT: + return collectMetaKeyFromShowTransactions(pCxt, (SShowStmt*)pStmt); + default: + break; + } + return TSDB_CODE_SUCCESS; +} + +int32_t collectMetaKey(SParseContext* pParseCxt, SQuery* pQuery) { + SCollectMetaKeyCxt cxt = {.pParseCxt = pParseCxt, .pMetaCache = taosMemoryCalloc(1, sizeof(SParseMetaCache))}; + if (NULL == cxt.pMetaCache) { + return TSDB_CODE_OUT_OF_MEMORY; + } + int32_t code = collectMetaKeyFromQuery(&cxt, pQuery->pRoot); + if (TSDB_CODE_SUCCESS == code) { + TSWAP(pQuery->pMetaCache, cxt.pMetaCache); + } + destroyCollectMetaKeyCxt(&cxt); + return code; +} diff --git a/source/libs/parser/src/parAuthenticator.c b/source/libs/parser/src/parAuthenticator.c index 250e7910d69847a130fa4f0b2132b3dcb99da8e7..2670e5710b9f5418c401e9799678c68d82c8f29d 100644 --- a/source/libs/parser/src/parAuthenticator.c +++ b/source/libs/parser/src/parAuthenticator.c @@ -18,23 +18,30 @@ #include "parInt.h" typedef struct SAuthCxt { - SParseContext* pParseCxt; - int32_t errCode; + SParseContext* pParseCxt; + SParseMetaCache* pMetaCache; + int32_t errCode; } SAuthCxt; static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt); -static int32_t checkAuth(SParseContext* pCxt, const char* pDbName, AUTH_TYPE type) { - if (pCxt->isSuperUser) { +static int32_t checkAuth(SAuthCxt* pCxt, const char* pDbName, AUTH_TYPE type) { + SParseContext* pParseCxt = pCxt->pParseCxt; + if (pParseCxt->isSuperUser) { return TSDB_CODE_SUCCESS; } SName name; - tNameSetDbName(&name, pCxt->acctId, pDbName, strlen(pDbName)); + tNameSetDbName(&name, pParseCxt->acctId, pDbName, strlen(pDbName)); char dbFname[TSDB_DB_FNAME_LEN] = {0}; tNameGetFullDbName(&name, dbFname); + int32_t code = TSDB_CODE_SUCCESS; bool pass = false; - int32_t code = - catalogChkAuth(pCxt->pCatalog, pCxt->pTransporter, &pCxt->mgmtEpSet, pCxt->pUser, dbFname, type, &pass); + if (NULL != pCxt->pMetaCache) { + code = getUserAuthFromCache(pCxt->pMetaCache, pParseCxt->pUser, dbFname, type, &pass); + } else { + code = catalogChkAuth(pParseCxt->pCatalog, pParseCxt->pTransporter, &pParseCxt->mgmtEpSet, pParseCxt->pUser, + dbFname, type, &pass); + } return TSDB_CODE_SUCCESS == code ? (pass ? TSDB_CODE_SUCCESS : TSDB_CODE_PAR_PERMISSION_DENIED) : code; } @@ -45,7 +52,7 @@ static EDealRes authSubquery(SAuthCxt* pCxt, SNode* pStmt) { static EDealRes authSelectImpl(SNode* pNode, void* pContext) { SAuthCxt* pCxt = pContext; if (QUERY_NODE_REAL_TABLE == nodeType(pNode)) { - pCxt->errCode = checkAuth(pCxt->pParseCxt, ((SRealTableNode*)pNode)->table.dbName, AUTH_TYPE_READ); + pCxt->errCode = checkAuth(pCxt, ((SRealTableNode*)pNode)->table.dbName, AUTH_TYPE_READ); return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR; } else if (QUERY_NODE_TEMP_TABLE == nodeType(pNode)) { return authSubquery(pCxt, ((STempTableNode*)pNode)->pSubquery); @@ -79,87 +86,8 @@ static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt) { return authSetOperator(pCxt, (SSetOperator*)pStmt); case QUERY_NODE_SELECT_STMT: return authSelect(pCxt, (SSelectStmt*)pStmt); - case QUERY_NODE_CREATE_DATABASE_STMT: - case QUERY_NODE_DROP_DATABASE_STMT: - case QUERY_NODE_ALTER_DATABASE_STMT: - case QUERY_NODE_CREATE_TABLE_STMT: - case QUERY_NODE_CREATE_SUBTABLE_CLAUSE: - case QUERY_NODE_CREATE_MULTI_TABLE_STMT: - case QUERY_NODE_DROP_TABLE_CLAUSE: - case QUERY_NODE_DROP_TABLE_STMT: - case QUERY_NODE_DROP_SUPER_TABLE_STMT: - case QUERY_NODE_ALTER_TABLE_STMT: - case QUERY_NODE_CREATE_USER_STMT: - case QUERY_NODE_ALTER_USER_STMT: - break; - case QUERY_NODE_DROP_USER_STMT: { + case QUERY_NODE_DROP_USER_STMT: return authDropUser(pCxt, (SDropUserStmt*)pStmt); - } - case QUERY_NODE_USE_DATABASE_STMT: - case QUERY_NODE_CREATE_DNODE_STMT: - case QUERY_NODE_DROP_DNODE_STMT: - case QUERY_NODE_ALTER_DNODE_STMT: - case QUERY_NODE_CREATE_INDEX_STMT: - case QUERY_NODE_DROP_INDEX_STMT: - case QUERY_NODE_CREATE_QNODE_STMT: - case QUERY_NODE_DROP_QNODE_STMT: - case QUERY_NODE_CREATE_BNODE_STMT: - case QUERY_NODE_DROP_BNODE_STMT: - case QUERY_NODE_CREATE_SNODE_STMT: - case QUERY_NODE_DROP_SNODE_STMT: - case QUERY_NODE_CREATE_MNODE_STMT: - case QUERY_NODE_DROP_MNODE_STMT: - case QUERY_NODE_CREATE_TOPIC_STMT: - case QUERY_NODE_DROP_TOPIC_STMT: - case QUERY_NODE_ALTER_LOCAL_STMT: - case QUERY_NODE_EXPLAIN_STMT: - case QUERY_NODE_DESCRIBE_STMT: - case QUERY_NODE_RESET_QUERY_CACHE_STMT: - case QUERY_NODE_COMPACT_STMT: - case QUERY_NODE_CREATE_FUNCTION_STMT: - case QUERY_NODE_DROP_FUNCTION_STMT: - case QUERY_NODE_CREATE_STREAM_STMT: - case QUERY_NODE_DROP_STREAM_STMT: - case QUERY_NODE_MERGE_VGROUP_STMT: - case QUERY_NODE_REDISTRIBUTE_VGROUP_STMT: - case QUERY_NODE_SPLIT_VGROUP_STMT: - case QUERY_NODE_SYNCDB_STMT: - case QUERY_NODE_GRANT_STMT: - case QUERY_NODE_REVOKE_STMT: - case QUERY_NODE_SHOW_DNODES_STMT: - case QUERY_NODE_SHOW_MNODES_STMT: - case QUERY_NODE_SHOW_MODULES_STMT: - case QUERY_NODE_SHOW_QNODES_STMT: - case QUERY_NODE_SHOW_SNODES_STMT: - case QUERY_NODE_SHOW_BNODES_STMT: - case QUERY_NODE_SHOW_CLUSTER_STMT: - case QUERY_NODE_SHOW_DATABASES_STMT: - case QUERY_NODE_SHOW_FUNCTIONS_STMT: - case QUERY_NODE_SHOW_INDEXES_STMT: - case QUERY_NODE_SHOW_STABLES_STMT: - case QUERY_NODE_SHOW_STREAMS_STMT: - case QUERY_NODE_SHOW_TABLES_STMT: - case QUERY_NODE_SHOW_USERS_STMT: - case QUERY_NODE_SHOW_LICENCE_STMT: - case QUERY_NODE_SHOW_VGROUPS_STMT: - case QUERY_NODE_SHOW_TOPICS_STMT: - case QUERY_NODE_SHOW_CONSUMERS_STMT: - case QUERY_NODE_SHOW_SUBSCRIBES_STMT: - case QUERY_NODE_SHOW_SMAS_STMT: - case QUERY_NODE_SHOW_CONFIGS_STMT: - case QUERY_NODE_SHOW_CONNECTIONS_STMT: - case QUERY_NODE_SHOW_QUERIES_STMT: - case QUERY_NODE_SHOW_VNODES_STMT: - case QUERY_NODE_SHOW_APPS_STMT: - case QUERY_NODE_SHOW_SCORES_STMT: - case QUERY_NODE_SHOW_VARIABLE_STMT: - case QUERY_NODE_SHOW_CREATE_DATABASE_STMT: - case QUERY_NODE_SHOW_CREATE_TABLE_STMT: - case QUERY_NODE_SHOW_CREATE_STABLE_STMT: - case QUERY_NODE_SHOW_TRANSACTIONS_STMT: - case QUERY_NODE_KILL_CONNECTION_STMT: - case QUERY_NODE_KILL_QUERY_STMT: - case QUERY_NODE_KILL_TRANSACTION_STMT: default: break; } @@ -168,6 +96,6 @@ static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt) { } int32_t authenticate(SParseContext* pParseCxt, SQuery* pQuery) { - SAuthCxt cxt = {.pParseCxt = pParseCxt, .errCode = TSDB_CODE_SUCCESS}; + SAuthCxt cxt = {.pParseCxt = pParseCxt, .pMetaCache = pQuery->pMetaCache, .errCode = TSDB_CODE_SUCCESS}; return authQuery(&cxt, pQuery->pRoot); } diff --git a/source/libs/parser/src/parCalcConst.c b/source/libs/parser/src/parCalcConst.c index 646ef4cf6293eb754eb04427954104d1c2de651a..42b001c1318058be96871918bea5aee0f084c82a 100644 --- a/source/libs/parser/src/parCalcConst.c +++ b/source/libs/parser/src/parCalcConst.c @@ -176,11 +176,11 @@ static int32_t calcConstProject(SNode* pProject, SNode** pNew) { } int32_t code = scalarCalculateConstants(pProject, pNew); - if (TSDB_CODE_SUCCESS == code && QUERY_NODE_VALUE == nodeType(pNew) && NULL != pAssociation) { + if (TSDB_CODE_SUCCESS == code && QUERY_NODE_VALUE == nodeType(*pNew) && NULL != pAssociation) { int32_t size = taosArrayGetSize(pAssociation); for (int32_t i = 0; i < size; ++i) { - SNode** pCol = taosArrayGet(pAssociation, i); - *pCol = nodesCloneNode(pNew); + SNode** pCol = taosArrayGetP(pAssociation, i); + *pCol = nodesCloneNode(*pNew); if (NULL == *pCol) { return TSDB_CODE_OUT_OF_MEMORY; } @@ -189,11 +189,18 @@ static int32_t calcConstProject(SNode* pProject, SNode** pNew) { return code; } -static int32_t calcConstProjections(SCalcConstContext* pCxt, SNodeList* pProjections, bool subquery) { +static bool isUselessCol(bool hasSelectValFunc, SExprNode* pProj) { + if (hasSelectValFunc && QUERY_NODE_FUNCTION == nodeType(pProj) && fmIsSelectFunc(((SFunctionNode*)pProj)->funcId)) { + return false; + } + return NULL == ((SExprNode*)pProj)->pAssociation; +} + +static int32_t calcConstProjections(SCalcConstContext* pCxt, SSelectStmt* pSelect, bool subquery) { SNode* pProj = NULL; - WHERE_EACH(pProj, pProjections) { - if (subquery && NULL == ((SExprNode*)pProj)->pAssociation) { - ERASE_NODE(pProjections); + WHERE_EACH(pProj, pSelect->pProjectionList) { + if (subquery && isUselessCol(pSelect->hasSelectValFunc, (SExprNode*)pProj)) { + ERASE_NODE(pSelect->pProjectionList); continue; } SNode* pNew = NULL; @@ -226,9 +233,9 @@ static int32_t calcConstGroupBy(SCalcConstContext* pCxt, SSelectStmt* pSelect) { } static int32_t calcConstSelect(SCalcConstContext* pCxt, SSelectStmt* pSelect, bool subquery) { - int32_t code = calcConstProjections(pCxt, pSelect->pProjectionList, subquery); + int32_t code = calcConstFromTable(pCxt, pSelect); if (TSDB_CODE_SUCCESS == code) { - code = calcConstFromTable(pCxt, pSelect); + code = calcConstProjections(pCxt, pSelect, subquery); } if (TSDB_CODE_SUCCESS == code) { code = calcConstSelectCondition(pCxt, pSelect, &pSelect->pWhere); diff --git a/source/libs/parser/src/parInsert.c b/source/libs/parser/src/parInsert.c index 11324e3f49af1027fb6244bf49d78a14b427e782..047c2d15045f667d41319b6d7c14c475cd6273a1 100644 --- a/source/libs/parser/src/parInsert.c +++ b/source/libs/parser/src/parInsert.c @@ -64,6 +64,7 @@ typedef struct SInsertParseContext { int32_t totalNum; SVnodeModifOpStmt* pOutput; SStmtCallback* pStmtCb; + SParseMetaCache* pMetaCache; } SInsertParseContext; typedef int32_t (*_row_append_fn_t)(SMsgBuf* pMsgBuf, const void* value, int32_t len, void* param); @@ -92,15 +93,15 @@ typedef struct SMemParam { } \ } while (0) -static int32_t skipInsertInto(SInsertParseContext* pCxt) { +static int32_t skipInsertInto(char** pSql, SMsgBuf* pMsg) { SToken sToken; - NEXT_TOKEN(pCxt->pSql, sToken); + NEXT_TOKEN(*pSql, sToken); if (TK_INSERT != sToken.type) { - return buildSyntaxErrMsg(&pCxt->msg, "keyword INSERT is expected", sToken.z); + return buildSyntaxErrMsg(pMsg, "keyword INSERT is expected", sToken.z); } - NEXT_TOKEN(pCxt->pSql, sToken); + NEXT_TOKEN(*pSql, sToken); if (TK_INTO != sToken.type) { - return buildSyntaxErrMsg(&pCxt->msg, "keyword INTO is expected", sToken.z); + return buildSyntaxErrMsg(pMsg, "keyword INTO is expected", sToken.z); } return TSDB_CODE_SUCCESS; } @@ -189,6 +190,7 @@ static int32_t createSName(SName* pName, SToken* pTableName, int32_t acctId, con const char* msg1 = "name too long"; const char* msg2 = "invalid database name"; const char* msg3 = "db is not specified"; + const char* msg4 = "invalid table name"; int32_t code = TSDB_CODE_SUCCESS; char* p = strnchr(pTableName->z, TS_PATH_DELIMITER[0], pTableName->n, true); @@ -207,7 +209,11 @@ static int32_t createSName(SName* pName, SToken* pTableName, int32_t acctId, con } int32_t tbLen = pTableName->n - dbLen - 1; - char tbname[TSDB_TABLE_FNAME_LEN] = {0}; + if (tbLen <= 0) { + return buildInvalidOperationMsg(pMsgBuf, msg4); + } + + char tbname[TSDB_TABLE_FNAME_LEN] = {0}; strncpy(tbname, p + 1, tbLen); /*tbLen = */ strdequote(tbname); @@ -245,25 +251,46 @@ static int32_t createSName(SName* pName, SToken* pTableName, int32_t acctId, con return code; } -static int32_t getTableMetaImpl(SInsertParseContext* pCxt, SName* name, char* dbFname, bool isStb) { +static int32_t checkAuth(SInsertParseContext* pCxt, char* pDbFname, bool* pPass) { SParseContext* pBasicCtx = pCxt->pComCxt; + if (NULL != pCxt->pMetaCache) { + return getUserAuthFromCache(pCxt->pMetaCache, pBasicCtx->pUser, pDbFname, AUTH_TYPE_WRITE, pPass); + } + return catalogChkAuth(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, pBasicCtx->pUser, pDbFname, + AUTH_TYPE_WRITE, pPass); +} +static int32_t getTableSchema(SInsertParseContext* pCxt, SName* pTbName, bool isStb, STableMeta** pTableMeta) { + SParseContext* pBasicCtx = pCxt->pComCxt; + if (NULL != pCxt->pMetaCache) { + return getTableMetaFromCache(pCxt->pMetaCache, pTbName, pTableMeta); + } + if (isStb) { + return catalogGetSTableMeta(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, pTbName, + pTableMeta); + } + return catalogGetTableMeta(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, pTbName, pTableMeta); +} + +static int32_t getTableVgroup(SInsertParseContext* pCxt, SName* pTbName, SVgroupInfo* pVg) { + SParseContext* pBasicCtx = pCxt->pComCxt; + if (NULL != pCxt->pMetaCache) { + return getTableVgroupFromCache(pCxt->pMetaCache, pTbName, pVg); + } + return catalogGetTableHashVgroup(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, pTbName, pVg); +} + +static int32_t getTableMetaImpl(SInsertParseContext* pCxt, SName* name, char* dbFname, bool isStb) { bool pass = false; - CHECK_CODE(catalogChkAuth(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, pBasicCtx->pUser, - dbFname, AUTH_TYPE_WRITE, &pass)); + CHECK_CODE(checkAuth(pCxt, dbFname, &pass)); if (!pass) { return TSDB_CODE_PAR_PERMISSION_DENIED; } - if (isStb) { - CHECK_CODE(catalogGetSTableMeta(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, name, - &pCxt->pTableMeta)); - } else { - CHECK_CODE(catalogGetTableMeta(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, name, - &pCxt->pTableMeta)); - ASSERT(pCxt->pTableMeta->tableInfo.rowSize > 0); + + CHECK_CODE(getTableSchema(pCxt, name, isStb, &pCxt->pTableMeta)); + if (!isStb) { SVgroupInfo vg; - CHECK_CODE( - catalogGetTableHashVgroup(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, name, &vg)); + CHECK_CODE(getTableVgroup(pCxt, name, &vg)); CHECK_CODE(taosHashPut(pCxt->pVgroupsHashObj, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg))); } return TSDB_CODE_SUCCESS; @@ -605,7 +632,7 @@ static int32_t parseValueToken(char** end, SToken* pToken, SSchema* pSchema, int case TSDB_DATA_TYPE_BINARY: { // Too long values will raise the invalid sql error message if (pToken->n + VARSTR_HEADER_SIZE > pSchema->bytes) { - return buildSyntaxErrMsg(pMsgBuf, "string data overflow", pToken->z); + return generateSyntaxErrMsg(pMsgBuf, TSDB_CODE_PAR_VALUE_TOO_LONG, pSchema->name); } return func(pMsgBuf, pToken->z, pToken->n, param); @@ -647,12 +674,15 @@ static FORCE_INLINE int32_t MemRowAppend(SMsgBuf* pMsgBuf, const void* value, in if (TSDB_DATA_TYPE_BINARY == pa->schema->type) { const char* rowEnd = tdRowEnd(rb->pBuf); STR_WITH_SIZE_TO_VARSTR(rowEnd, value, len); - tdAppendColValToRow(rb, pa->schema->colId, pa->schema->type, TD_VTYPE_NORM, rowEnd, true, pa->toffset, pa->colIdx); + tdAppendColValToRow(rb, pa->schema->colId, pa->schema->type, TD_VTYPE_NORM, rowEnd, false, pa->toffset, pa->colIdx); } else if (TSDB_DATA_TYPE_NCHAR == pa->schema->type) { // if the converted output len is over than pColumnModel->bytes, return error: 'Argument list too long' int32_t output = 0; const char* rowEnd = tdRowEnd(rb->pBuf); if (!taosMbsToUcs4(value, len, (TdUcs4*)varDataVal(rowEnd), pa->schema->bytes - VARSTR_HEADER_SIZE, &output)) { + if (errno == E2BIG) { + return generateSyntaxErrMsg(pMsgBuf, TSDB_CODE_PAR_VALUE_TOO_LONG, pa->schema->name); + } char buf[512] = {0}; snprintf(buf, tListLen(buf), "%s", strerror(errno)); return buildSyntaxErrMsg(pMsgBuf, buf, value); @@ -701,7 +731,7 @@ static int32_t parseBoundColumns(SInsertParseContext* pCxt, SParsedDataColInfo* } lastColIdx = index; pColList->cols[index].valStat = VAL_STAT_HAS; - pColList->boundColumns[pColList->numOfBound] = index + PRIMARYKEY_TIMESTAMP_COL_ID; + pColList->boundColumns[pColList->numOfBound] = index; ++pColList->numOfBound; switch (pSchema[t].type) { case TSDB_DATA_TYPE_BINARY: @@ -766,6 +796,10 @@ static int32_t KvRowAppend(SMsgBuf* pMsgBuf, const void* value, int32_t len, voi // if the converted output len is over than pColumnModel->bytes, return error: 'Argument list too long' int32_t output = 0; if (!taosMbsToUcs4(value, len, (TdUcs4*)varDataVal(pa->buf), pa->schema->bytes - VARSTR_HEADER_SIZE, &output)) { + if (errno == E2BIG) { + return generateSyntaxErrMsg(pMsgBuf, TSDB_CODE_PAR_VALUE_TOO_LONG, pa->schema->name); + } + char buf[512] = {0}; snprintf(buf, tListLen(buf), " taosMbsToUcs4 error:%s", strerror(errno)); return buildSyntaxErrMsg(pMsgBuf, buf, value); @@ -815,7 +849,7 @@ static int32_t parseTagsClause(SInsertParseContext* pCxt, SSchema* pSchema, uint return buildInvalidOperationMsg(&pCxt->msg, "no mix usage for ? and tag values"); } - SSchema* pTagSchema = &pSchema[pCxt->tags.boundColumns[i] - 1]; // colId starts with 1 + SSchema* pTagSchema = &pSchema[pCxt->tags.boundColumns[i]]; param.schema = pTagSchema; CHECK_CODE( parseValueToken(&pCxt->pSql, &sToken, pTagSchema, precision, tmpTokenBuf, KvRowAppend, ¶m, &pCxt->msg)); @@ -845,10 +879,8 @@ static int32_t cloneTableMeta(STableMeta* pSrc, STableMeta** pDst) { static int32_t storeTableMeta(SInsertParseContext* pCxt, SHashObj* pHash, SName* pTableName, const char* pName, int32_t len, STableMeta* pMeta) { - SVgroupInfo vg; - SParseContext* pBasicCtx = pCxt->pComCxt; - CHECK_CODE( - catalogGetTableHashVgroup(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, pTableName, &vg)); + SVgroupInfo vg; + CHECK_CODE(getTableVgroup(pCxt, pTableName, &vg)); CHECK_CODE(taosHashPut(pCxt->pVgroupsHashObj, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg))); pMeta->uid = 0; @@ -903,7 +935,7 @@ static int32_t parseUsingClause(SInsertParseContext* pCxt, SName* name, char* tb if (TK_NK_LP != sToken.type) { return buildSyntaxErrMsg(&pCxt->msg, "( is expected", sToken.z); } - CHECK_CODE(parseTagsClause(pCxt, pCxt->pTableMeta->schema, getTableInfo(pCxt->pTableMeta).precision, name->tname)); + CHECK_CODE(parseTagsClause(pCxt, pTagsSchema, getTableInfo(pCxt->pTableMeta).precision, name->tname)); NEXT_VALID_TOKEN(pCxt->pSql, sToken); if (TK_NK_COMMA == sToken.type) { return generateSyntaxErrMsg(&pCxt->msg, TSDB_CODE_PAR_TAGS_NOT_MATCHED); @@ -929,7 +961,7 @@ static int parseOneRow(SInsertParseContext* pCxt, STableDataBlocks* pDataBlocks, // 1. set the parsed value from sql string for (int i = 0; i < spd->numOfBound; ++i) { NEXT_TOKEN_WITH_PREV(pCxt->pSql, sToken); - SSchema* pSchema = &schema[spd->boundColumns[i] - 1]; + SSchema* pSchema = &schema[spd->boundColumns[i]]; if (sToken.type == TK_NK_QUESTION) { isParseBindParam = true; @@ -1070,10 +1102,9 @@ static void destroyInsertParseContext(SInsertParseContext* pCxt) { // VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path // [...]; static int32_t parseInsertBody(SInsertParseContext* pCxt) { - int32_t tbNum = 0; - char tbFName[TSDB_TABLE_FNAME_LEN]; - bool autoCreateTbl = false; - STableMeta* pMeta = NULL; + int32_t tbNum = 0; + char tbFName[TSDB_TABLE_FNAME_LEN]; + bool autoCreateTbl = false; // for each table while (1) { @@ -1088,7 +1119,7 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) { if (sToken.type && pCxt->pSql[0]) { return buildSyntaxErrMsg(&pCxt->msg, "invalid charactor in SQL", sToken.z); } - + if (0 == pCxt->totalNum && (!TSDB_QUERY_HAS_TYPE(pCxt->pOutput->insertType, TSDB_QUERY_TYPE_STMT_INSERT))) { return buildInvalidOperationMsg(&pCxt->msg, "no data in sql"); } @@ -1116,12 +1147,12 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) { NEXT_TOKEN(pCxt->pSql, sToken); SName name; - createSName(&name, &tbnameToken, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg); - tNameExtractFullName(&name, tbFName); + CHECK_CODE(createSName(&name, &tbnameToken, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg)); + tNameExtractFullName(&name, tbFName); CHECK_CODE(taosHashPut(pCxt->pTableNameHashObj, tbFName, strlen(tbFName), &name, sizeof(SName))); - // USING cluase + // USING clause if (TK_USING == sToken.type) { CHECK_CODE(parseUsingClause(pCxt, &name, tbFName)); NEXT_TOKEN(pCxt->pSql, sToken); @@ -1136,12 +1167,10 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) { CHECK_CODE(getDataBlockFromList(pCxt->pTableBlockHashObj, tbFName, strlen(tbFName), TSDB_DEFAULT_PAYLOAD_SIZE, sizeof(SSubmitBlk), getTableInfo(pCxt->pTableMeta).rowSize, pCxt->pTableMeta, &dataBuf, NULL, &pCxt->createTblReq)); - pMeta = pCxt->pTableMeta; - pCxt->pTableMeta = NULL; if (TK_NK_LP == sToken.type) { // pSql -> field1_name, ...) - CHECK_CODE(parseBoundColumns(pCxt, &dataBuf->boundColumnInfo, getTableColumnSchema(pMeta))); + CHECK_CODE(parseBoundColumns(pCxt, &dataBuf->boundColumnInfo, getTableColumnSchema(pCxt->pTableMeta))); NEXT_TOKEN(pCxt->pSql, sToken); } @@ -1177,8 +1206,8 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } memcpy(tags, &pCxt->tags, sizeof(pCxt->tags)); - (*pCxt->pStmtCb->setInfoFn)(pCxt->pStmtCb->pStmt, pMeta, tags, tbFName, autoCreateTbl, pCxt->pVgroupsHashObj, - pCxt->pTableBlockHashObj); + (*pCxt->pStmtCb->setInfoFn)(pCxt->pStmtCb->pStmt, pCxt->pTableMeta, tags, tbFName, autoCreateTbl, + pCxt->pVgroupsHashObj, pCxt->pTableBlockHashObj); memset(&pCxt->tags, 0, sizeof(pCxt->tags)); pCxt->pVgroupsHashObj = NULL; @@ -1236,12 +1265,11 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery) { if (NULL == *pQuery) { return TSDB_CODE_OUT_OF_MEMORY; } - - (*pQuery)->execMode = QUERY_EXEC_MODE_SCHEDULE; - (*pQuery)->haveResultSet = false; - (*pQuery)->msgType = TDMT_VND_SUBMIT; - (*pQuery)->pRoot = (SNode*)context.pOutput; } + (*pQuery)->execMode = QUERY_EXEC_MODE_SCHEDULE; + (*pQuery)->haveResultSet = false; + (*pQuery)->msgType = TDMT_VND_SUBMIT; + (*pQuery)->pRoot = (SNode*)context.pOutput; if (NULL == (*pQuery)->pTableList) { (*pQuery)->pTableList = taosArrayInit(taosHashGetSize(context.pTableNameHashObj), sizeof(SName)); @@ -1252,7 +1280,7 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery) { context.pOutput->payloadType = PAYLOAD_TYPE_KV; - int32_t code = skipInsertInto(&context); + int32_t code = skipInsertInto(&context.pSql, &context.msg); if (TSDB_CODE_SUCCESS == code) { code = parseInsertBody(&context); } @@ -1267,6 +1295,171 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery) { return code; } +typedef struct SInsertParseSyntaxCxt { + SParseContext* pComCxt; + char* pSql; + SMsgBuf msg; + SParseMetaCache* pMetaCache; +} SInsertParseSyntaxCxt; + +static int32_t skipParentheses(SInsertParseSyntaxCxt* pCxt) { + SToken sToken; + while (1) { + NEXT_TOKEN(pCxt->pSql, sToken); + if (TK_NK_RP == sToken.type) { + break; + } + if (0 == sToken.n) { + return buildSyntaxErrMsg(&pCxt->msg, ") expected", NULL); + } + } + return TSDB_CODE_SUCCESS; +} + +static int32_t skipBoundColumns(SInsertParseSyntaxCxt* pCxt) { return skipParentheses(pCxt); } + +// pSql -> (field1_value, ...) [(field1_value2, ...) ...] +static int32_t skipValuesClause(SInsertParseSyntaxCxt* pCxt) { + int32_t numOfRows = 0; + SToken sToken; + while (1) { + int32_t index = 0; + NEXT_TOKEN_KEEP_SQL(pCxt->pSql, sToken, index); + if (TK_NK_LP != sToken.type) { + break; + } + pCxt->pSql += index; + + CHECK_CODE(skipParentheses(pCxt)); + ++numOfRows; + } + if (0 == numOfRows) { + return buildSyntaxErrMsg(&pCxt->msg, "no any data points", NULL); + } + return TSDB_CODE_SUCCESS; +} + +static int32_t skipTagsClause(SInsertParseSyntaxCxt* pCxt) { return skipParentheses(pCxt); } + +// pSql -> [(tag1_name, ...)] TAGS (tag1_value, ...) +static int32_t skipUsingClause(SInsertParseSyntaxCxt* pCxt) { + SToken sToken; + NEXT_TOKEN(pCxt->pSql, sToken); + if (TK_NK_LP == sToken.type) { + CHECK_CODE(skipBoundColumns(pCxt)); + NEXT_TOKEN(pCxt->pSql, sToken); + } + + if (TK_TAGS != sToken.type) { + return buildSyntaxErrMsg(&pCxt->msg, "TAGS is expected", sToken.z); + } + // pSql -> (tag1_value, ...) + NEXT_TOKEN(pCxt->pSql, sToken); + if (TK_NK_LP != sToken.type) { + return buildSyntaxErrMsg(&pCxt->msg, "( is expected", sToken.z); + } + CHECK_CODE(skipTagsClause(pCxt)); + + return TSDB_CODE_SUCCESS; +} + +static int32_t collectTableMetaKey(SInsertParseSyntaxCxt* pCxt, SToken* pTbToken) { + SName name; + CHECK_CODE(createSName(&name, pTbToken, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg)); + CHECK_CODE(reserveUserAuthInCacheExt(pCxt->pComCxt->pUser, &name, AUTH_TYPE_WRITE, pCxt->pMetaCache)); + CHECK_CODE(reserveTableMetaInCacheExt(&name, pCxt->pMetaCache)); + CHECK_CODE(reserveTableVgroupInCacheExt(&name, pCxt->pMetaCache)); + return TSDB_CODE_SUCCESS; +} + +static int32_t parseInsertBodySyntax(SInsertParseSyntaxCxt* pCxt) { + bool hasData = false; + // for each table + while (1) { + SToken sToken; + + // pSql -> tb_name ... + NEXT_TOKEN(pCxt->pSql, sToken); + + // no data in the sql string anymore. + if (sToken.n == 0) { + if (sToken.type && pCxt->pSql[0]) { + return buildSyntaxErrMsg(&pCxt->msg, "invalid charactor in SQL", sToken.z); + } + + if (!hasData) { + return buildInvalidOperationMsg(&pCxt->msg, "no data in sql"); + } + break; + } + + hasData = false; + + SToken tbnameToken = sToken; + NEXT_TOKEN(pCxt->pSql, sToken); + + // USING clause + if (TK_USING == sToken.type) { + NEXT_TOKEN(pCxt->pSql, sToken); + CHECK_CODE(collectTableMetaKey(pCxt, &sToken)); + CHECK_CODE(skipUsingClause(pCxt)); + NEXT_TOKEN(pCxt->pSql, sToken); + } else { + CHECK_CODE(collectTableMetaKey(pCxt, &tbnameToken)); + } + + if (TK_NK_LP == sToken.type) { + // pSql -> field1_name, ...) + CHECK_CODE(skipBoundColumns(pCxt)); + NEXT_TOKEN(pCxt->pSql, sToken); + } + + if (TK_VALUES == sToken.type) { + // pSql -> (field1_value, ...) [(field1_value2, ...) ...] + CHECK_CODE(skipValuesClause(pCxt)); + hasData = true; + continue; + } + + // FILE csv_file_path + if (TK_FILE == sToken.type) { + // pSql -> csv_file_path + NEXT_TOKEN(pCxt->pSql, sToken); + if (0 == sToken.n || (TK_NK_STRING != sToken.type && TK_NK_ID != sToken.type)) { + return buildSyntaxErrMsg(&pCxt->msg, "file path is required following keyword FILE", sToken.z); + } + hasData = true; + continue; + } + + return buildSyntaxErrMsg(&pCxt->msg, "keyword VALUES or FILE is expected", sToken.z); + } + + return TSDB_CODE_SUCCESS; +} + +int32_t parseInsertSyntax(SParseContext* pContext, SQuery** pQuery) { + SInsertParseSyntaxCxt context = {.pComCxt = pContext, + .pSql = (char*)pContext->pSql, + .msg = {.buf = pContext->pMsg, .len = pContext->msgLen}, + .pMetaCache = taosMemoryCalloc(1, sizeof(SParseMetaCache))}; + if (NULL == context.pMetaCache) { + return TSDB_CODE_OUT_OF_MEMORY; + } + int32_t code = skipInsertInto(&context.pSql, &context.msg); + if (TSDB_CODE_SUCCESS == code) { + code = parseInsertBodySyntax(&context); + } + if (TSDB_CODE_SUCCESS == code) { + *pQuery = taosMemoryCalloc(1, sizeof(SQuery)); + if (NULL == *pQuery) { + return TSDB_CODE_OUT_OF_MEMORY; + } + TSWAP((*pQuery)->pMetaCache, context.pMetaCache); + } + return code; +} + int32_t qCreateSName(SName* pName, const char* pTableName, int32_t acctId, char* dbName, char* msgBuf, int32_t msgBufLen) { SMsgBuf msg = {.buf = msgBuf, .len = msgBufLen}; @@ -1337,7 +1530,7 @@ int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, char* tN continue; } - SSchema* pTagSchema = &pSchema[tags->boundColumns[c] - 1]; // colId starts with 1 + SSchema* pTagSchema = &pSchema[tags->boundColumns[c]]; param.schema = pTagSchema; int32_t colLen = pTagSchema->bytes; @@ -1384,7 +1577,7 @@ int32_t qBindStmtColsValue(void* pBlock, TAOS_MULTI_BIND* bind, char* msgBuf, in tdSRowResetBuf(pBuilder, row); for (int c = 0; c < spd->numOfBound; ++c) { - SSchema* pColSchema = &pSchema[spd->boundColumns[c] - 1]; + SSchema* pColSchema = &pSchema[spd->boundColumns[c]]; if (bind[c].num != rowNum) { return buildInvalidOperationMsg(&pBuf, "row number in each bind param should be the same"); @@ -1467,7 +1660,7 @@ int32_t qBindStmtSingleColValue(void* pBlock, TAOS_MULTI_BIND* bind, char* msgBu tdSRowGetBuf(pBuilder, row); } - SSchema* pColSchema = &pSchema[spd->boundColumns[colIdx] - 1]; + SSchema* pColSchema = &pSchema[spd->boundColumns[colIdx]]; if (bind->num != rowNum) { return buildInvalidOperationMsg(&pBuf, "row number in each bind param should be the same"); @@ -1539,7 +1732,7 @@ int32_t buildBoundFields(SParsedDataColInfo* boundInfo, SSchema* pSchema, int32_ } for (int32_t i = 0; i < boundInfo->numOfBound; ++i) { - SSchema* pTagSchema = &pSchema[boundInfo->boundColumns[i] - 1]; + SSchema* pTagSchema = &pSchema[boundInfo->boundColumns[i]]; strcpy((*fields)[i].name, pTagSchema->name); (*fields)[i].type = pTagSchema->type; (*fields)[i].bytes = pTagSchema->bytes; @@ -1638,7 +1831,7 @@ static int32_t smlBoundColumnData(SArray* cols, SParsedDataColInfo* pColList, SS } lastColIdx = index; pColList->cols[index].valStat = VAL_STAT_HAS; - pColList->boundColumns[pColList->numOfBound] = index + PRIMARYKEY_TIMESTAMP_COL_ID; + pColList->boundColumns[pColList->numOfBound] = index; ++pColList->numOfBound; switch (pSchema[t].type) { case TSDB_DATA_TYPE_BINARY: @@ -1688,7 +1881,7 @@ static int32_t smlBuildTagRow(SArray* cols, SKVRowBuilder* tagsBuilder, SParsedD SKvParam param = {.builder = tagsBuilder}; for (int i = 0; i < tags->numOfBound; ++i) { - SSchema* pTagSchema = &pSchema[tags->boundColumns[i] - 1]; // colId starts with 1 + SSchema* pTagSchema = &pSchema[tags->boundColumns[i]]; param.schema = pTagSchema; SSmlKv* kv = taosArrayGetP(cols, i); if (IS_VAR_DATA_TYPE(kv->type)) { @@ -1771,7 +1964,7 @@ int32_t smlBindData(void* handle, SArray* tags, SArray* colsSchema, SArray* cols // 1. set the parsed value from sql string for (int c = 0, j = 0; c < spd->numOfBound; ++c) { - SSchema* pColSchema = &pSchema[spd->boundColumns[c] - 1]; + SSchema* pColSchema = &pSchema[spd->boundColumns[c]]; param.schema = pColSchema; getSTSRowAppendInfo(pBuilder->rowType, spd, c, ¶m.toffset, ¶m.colIdx); diff --git a/source/libs/parser/src/parInsertData.c b/source/libs/parser/src/parInsertData.c index f82c792c96bb9affb839c37c7ee82358e6c84162..1960073f295e278a66eec6e49d8d2b97418a14a5 100644 --- a/source/libs/parser/src/parInsertData.c +++ b/source/libs/parser/src/parInsertData.c @@ -74,7 +74,7 @@ void setBoundColumnInfo(SParsedDataColInfo* pColList, SSchema* pSchema, col_id_t default: break; } - pColList->boundColumns[i] = pSchema[i].colId; + pColList->boundColumns[i] = i; } pColList->allNullLen += pColList->flen; pColList->boundNullLen = pColList->allNullLen; // default set allNullLen diff --git a/source/libs/parser/src/parTokenizer.c b/source/libs/parser/src/parTokenizer.c index 8fb9780f8a5b52c62822c25eb1b52be40d30c1d9..540de2d639be9e69e798316e04bb4a46ff9dd58e 100644 --- a/source/libs/parser/src/parTokenizer.c +++ b/source/libs/parser/src/parTokenizer.c @@ -53,6 +53,7 @@ static SKeyword keywordTable[] = { {"CACHE", TK_CACHE}, {"CACHELAST", TK_CACHELAST}, {"CAST", TK_CAST}, + {"CGROUP", TK_CGROUP}, {"CLUSTER", TK_CLUSTER}, {"COLUMN", TK_COLUMN}, {"COMMENT", TK_COMMENT}, @@ -156,6 +157,7 @@ static SKeyword keywordTable[] = { {"REVOKE", TK_REVOKE}, {"ROLLUP", TK_ROLLUP}, {"SCHEMA", TK_SCHEMA}, + {"SCHEMALESS", TK_SCHEMALESS}, {"SCORES", TK_SCORES}, {"SELECT", TK_SELECT}, {"SESSION", TK_SESSION}, @@ -605,12 +607,12 @@ uint32_t tGetToken(const char* z, uint32_t* tokenId) { } return i; } - case '[': { - for (i = 1; z[i] && z[i - 1] != ']'; i++) { - } - *tokenId = TK_NK_ID; - return i; - } + // case '[': { + // for (i = 1; z[i] && z[i - 1] != ']'; i++) { + // } + // *tokenId = TK_NK_ID; + // return i; + // } case 'T': case 't': case 'F': diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 8e18c267d68f0db99306062f5e0d9a9444ae5bc0..c6a7f95d5e141bf685d706fa4eed60e91d5d0cab 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -40,14 +40,16 @@ typedef struct STranslateContext { SHashObj* pDbs; SHashObj* pTables; SExplainOptions* pExplainOpt; + SParseMetaCache* pMetaCache; } STranslateContext; typedef struct SFullDatabaseName { char fullDbName[TSDB_DB_FNAME_LEN]; } SFullDatabaseName; -static int32_t translateSubquery(STranslateContext* pCxt, SNode* pNode); -static int32_t translateQuery(STranslateContext* pCxt, SNode* pNode); +static int32_t translateSubquery(STranslateContext* pCxt, SNode* pNode); +static int32_t translateQuery(STranslateContext* pCxt, SNode* pNode); +static EDealRes translateValue(STranslateContext* pCxt, SValueNode* pVal); static bool afterGroupBy(ESqlClause clause) { return clause > SQL_CLAUSE_GROUP_BY; } @@ -101,12 +103,17 @@ static int32_t collectUseTable(const SName* pName, SHashObj* pDbs) { static int32_t getTableMetaImpl(STranslateContext* pCxt, const SName* pName, STableMeta** pMeta) { SParseContext* pParCxt = pCxt->pParseCxt; - int32_t code = collectUseDatabase(pName, pCxt->pDbs); - if (TSDB_CODE_SUCCESS == code) { - code = collectUseTable(pName, pCxt->pTables); - } - if (TSDB_CODE_SUCCESS == code) { - code = catalogGetTableMeta(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, pName, pMeta); + int32_t code = TSDB_CODE_SUCCESS; + if (pParCxt->async) { + code = getTableMetaFromCache(pCxt->pMetaCache, pName, pMeta); + } else { + code = collectUseDatabase(pName, pCxt->pDbs); + if (TSDB_CODE_SUCCESS == code) { + code = collectUseTable(pName, pCxt->pTables); + } + if (TSDB_CODE_SUCCESS == code) { + code = catalogGetTableMeta(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, pName, pMeta); + } } if (TSDB_CODE_SUCCESS != code) { parserError("catalogGetTableMeta error, code:%s, dbName:%s, tbName:%s", tstrerror(code), pName->dbname, @@ -125,8 +132,13 @@ static int32_t refreshGetTableMeta(STranslateContext* pCxt, const char* pDbName, SParseContext* pParCxt = pCxt->pParseCxt; SName name; toName(pCxt->pParseCxt->acctId, pDbName, pTableName, &name); - int32_t code = - catalogRefreshGetTableMeta(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, &name, pMeta, false); + int32_t code = TSDB_CODE_SUCCESS; + if (pParCxt->async) { + code = getTableMetaFromCache(pCxt->pMetaCache, &name, pMeta); + } else { + code = + catalogRefreshGetTableMeta(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, &name, pMeta, false); + } if (TSDB_CODE_SUCCESS != code) { parserError("catalogRefreshGetTableMeta error, code:%s, dbName:%s, tbName:%s", tstrerror(code), pDbName, pTableName); @@ -134,29 +146,18 @@ static int32_t refreshGetTableMeta(STranslateContext* pCxt, const char* pDbName, return code; } -static int32_t getTableDistVgInfo(STranslateContext* pCxt, const SName* pName, SArray** pVgInfo) { - SParseContext* pParCxt = pCxt->pParseCxt; - int32_t code = collectUseDatabase(pName, pCxt->pDbs); - if (TSDB_CODE_SUCCESS == code) { - code = collectUseTable(pName, pCxt->pTables); - } - if (TSDB_CODE_SUCCESS == code) { - code = catalogGetTableDistVgInfo(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, pName, pVgInfo); - } - if (TSDB_CODE_SUCCESS != code) { - parserError("catalogGetTableDistVgInfo error, code:%s, dbName:%s, tbName:%s", tstrerror(code), pName->dbname, - pName->tname); - } - return code; -} - static int32_t getDBVgInfoImpl(STranslateContext* pCxt, const SName* pName, SArray** pVgInfo) { SParseContext* pParCxt = pCxt->pParseCxt; char fullDbName[TSDB_DB_FNAME_LEN]; tNameGetFullDbName(pName, fullDbName); - int32_t code = collectUseDatabaseImpl(fullDbName, pCxt->pDbs); - if (TSDB_CODE_SUCCESS == code) { - code = catalogGetDBVgInfo(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, fullDbName, pVgInfo); + int32_t code = TSDB_CODE_SUCCESS; + if (pParCxt->async) { + code = getDbVgInfoFromCache(pCxt->pMetaCache, fullDbName, pVgInfo); + } else { + code = collectUseDatabaseImpl(fullDbName, pCxt->pDbs); + if (TSDB_CODE_SUCCESS == code) { + code = catalogGetDBVgInfo(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, fullDbName, pVgInfo); + } } if (TSDB_CODE_SUCCESS != code) { parserError("catalogGetDBVgInfo error, code:%s, dbFName:%s", tstrerror(code), fullDbName); @@ -174,12 +175,17 @@ static int32_t getDBVgInfo(STranslateContext* pCxt, const char* pDbName, SArray* static int32_t getTableHashVgroupImpl(STranslateContext* pCxt, const SName* pName, SVgroupInfo* pInfo) { SParseContext* pParCxt = pCxt->pParseCxt; - int32_t code = collectUseDatabase(pName, pCxt->pDbs); - if (TSDB_CODE_SUCCESS == code) { - code = collectUseTable(pName, pCxt->pTables); - } - if (TSDB_CODE_SUCCESS == code) { - code = catalogGetTableHashVgroup(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, pName, pInfo); + int32_t code = TSDB_CODE_SUCCESS; + if (pParCxt->async) { + code = getTableVgroupFromCache(pCxt->pMetaCache, pName, pInfo); + } else { + code = collectUseDatabase(pName, pCxt->pDbs); + if (TSDB_CODE_SUCCESS == code) { + code = collectUseTable(pName, pCxt->pTables); + } + if (TSDB_CODE_SUCCESS == code) { + code = catalogGetTableHashVgroup(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, pName, pInfo); + } } if (TSDB_CODE_SUCCESS != code) { parserError("catalogGetTableHashVgroup error, code:%s, dbName:%s, tbName:%s", tstrerror(code), pName->dbname, @@ -197,9 +203,14 @@ static int32_t getTableHashVgroup(STranslateContext* pCxt, const char* pDbName, static int32_t getDBVgVersion(STranslateContext* pCxt, const char* pDbFName, int32_t* pVersion, int64_t* pDbId, int32_t* pTableNum) { SParseContext* pParCxt = pCxt->pParseCxt; - int32_t code = collectUseDatabaseImpl(pDbFName, pCxt->pDbs); - if (TSDB_CODE_SUCCESS == code) { - code = catalogGetDBVgVersion(pParCxt->pCatalog, pDbFName, pVersion, pDbId, pTableNum); + int32_t code = TSDB_CODE_SUCCESS; + if (pParCxt->async) { + code = getDbVgVersionFromCache(pCxt->pMetaCache, pDbFName, pVersion, pDbId, pTableNum); + } else { + code = collectUseDatabaseImpl(pDbFName, pCxt->pDbs); + if (TSDB_CODE_SUCCESS == code) { + code = catalogGetDBVgVersion(pParCxt->pCatalog, pDbFName, pVersion, pDbId, pTableNum); + } } if (TSDB_CODE_SUCCESS != code) { parserError("catalogGetDBVgVersion error, code:%s, dbFName:%s", tstrerror(code), pDbFName); @@ -213,9 +224,14 @@ static int32_t getDBCfg(STranslateContext* pCxt, const char* pDbName, SDbCfgInfo tNameSetDbName(&name, pCxt->pParseCxt->acctId, pDbName, strlen(pDbName)); char dbFname[TSDB_DB_FNAME_LEN] = {0}; tNameGetFullDbName(&name, dbFname); - int32_t code = collectUseDatabaseImpl(dbFname, pCxt->pDbs); - if (TSDB_CODE_SUCCESS == code) { - code = catalogGetDBCfg(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, dbFname, pInfo); + int32_t code = TSDB_CODE_SUCCESS; + if (pParCxt->async) { + code = getDbCfgFromCache(pCxt->pMetaCache, dbFname, pInfo); + } else { + code = collectUseDatabaseImpl(dbFname, pCxt->pDbs); + if (TSDB_CODE_SUCCESS == code) { + code = catalogGetDBCfg(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, dbFname, pInfo); + } } if (TSDB_CODE_SUCCESS != code) { parserError("catalogGetDBCfg error, code:%s, dbFName:%s", tstrerror(code), dbFname); @@ -223,7 +239,28 @@ static int32_t getDBCfg(STranslateContext* pCxt, const char* pDbName, SDbCfgInfo return code; } -static int32_t initTranslateContext(SParseContext* pParseCxt, STranslateContext* pCxt) { +static int32_t getUdfInfo(STranslateContext* pCxt, SFunctionNode* pFunc) { + SParseContext* pParCxt = pCxt->pParseCxt; + SFuncInfo funcInfo = {0}; + int32_t code = TSDB_CODE_SUCCESS; + if (pParCxt->async) { + code = getUdfInfoFromCache(pCxt->pMetaCache, pFunc->functionName, &funcInfo); + } else { + code = catalogGetUdfInfo(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, pFunc->functionName, + &funcInfo); + } + if (TSDB_CODE_SUCCESS == code) { + pFunc->funcType = FUNCTION_TYPE_UDF; + pFunc->funcId = TSDB_FUNC_TYPE_AGGREGATE == funcInfo.funcType ? FUNC_AGGREGATE_UDF_ID : FUNC_SCALAR_UDF_ID; + pFunc->node.resType.type = funcInfo.outputType; + pFunc->node.resType.bytes = funcInfo.outputLen; + pFunc->udfBufSize = funcInfo.bufSize; + tFreeSFuncInfo(&funcInfo); + } + return code; +} + +static int32_t initTranslateContext(SParseContext* pParseCxt, SParseMetaCache* pMetaCache, STranslateContext* pCxt) { pCxt->pParseCxt = pParseCxt; pCxt->errCode = TSDB_CODE_SUCCESS; pCxt->msgBuf.buf = pParseCxt->pMsg; @@ -231,6 +268,7 @@ static int32_t initTranslateContext(SParseContext* pParseCxt, STranslateContext* pCxt->pNsLevel = taosArrayInit(TARRAY_MIN_SIZE, POINTER_BYTES); pCxt->currLevel = 0; pCxt->currClause = 0; + pCxt->pMetaCache = pMetaCache; pCxt->pDbs = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); pCxt->pTables = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); if (NULL == pCxt->pNsLevel || NULL == pCxt->pDbs || NULL == pCxt->pTables) { @@ -292,8 +330,8 @@ static bool isScanPseudoColumnFunc(const SNode* pNode) { return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsScanPseudoColumnFunc(((SFunctionNode*)pNode)->funcId)); } -static bool isNonstandardSQLFunc(const SNode* pNode) { - return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsNonstandardSQLFunc(((SFunctionNode*)pNode)->funcId)); +static bool isIndefiniteRowsFunc(const SNode* pNode) { + return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsIndefiniteRowsFunc(((SFunctionNode*)pNode)->funcId)); } static bool isDistinctOrderBy(STranslateContext* pCxt) { @@ -342,12 +380,14 @@ static void setColumnInfoBySchema(const SRealTableNode* pTable, const SSchema* p } } -static void setColumnInfoByExpr(const STableNode* pTable, SExprNode* pExpr, SColumnNode* pCol) { +static void setColumnInfoByExpr(const STableNode* pTable, SExprNode* pExpr, SColumnNode** pColRef) { + SColumnNode* pCol = *pColRef; + pCol->pProjectRef = (SNode*)pExpr; if (NULL == pExpr->pAssociation) { pExpr->pAssociation = taosArrayInit(TARRAY_MIN_SIZE, POINTER_BYTES); } - taosArrayPush(pExpr->pAssociation, &pCol); + taosArrayPush(pExpr->pAssociation, &pColRef); if (NULL != pTable) { strcpy(pCol->tableAlias, pTable->tableAlias); } else if (QUERY_NODE_COLUMN == nodeType(pExpr)) { @@ -385,7 +425,7 @@ static int32_t createColumnsByTable(STranslateContext* pCxt, const STableNode* p if (NULL == pCol) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_OUT_OF_MEMORY); } - setColumnInfoByExpr(pTable, (SExprNode*)pNode, pCol); + setColumnInfoByExpr(pTable, (SExprNode*)pNode, &pCol); nodesListAppend(pList, (SNode*)pCol); } } @@ -425,8 +465,9 @@ static bool isPrimaryKey(STempTableNode* pTable, SNode* pExpr) { return isPrimaryKeyImpl(pTable, pExpr); } -static bool findAndSetColumn(SColumnNode* pCol, const STableNode* pTable) { - bool found = false; +static bool findAndSetColumn(SColumnNode** pColRef, const STableNode* pTable) { + SColumnNode* pCol = *pColRef; + bool found = false; if (QUERY_NODE_REAL_TABLE == nodeType(pTable)) { const STableMeta* pMeta = ((SRealTableNode*)pTable)->pMeta; if (isInternalPrimaryKey(pCol)) { @@ -448,7 +489,7 @@ static bool findAndSetColumn(SColumnNode* pCol, const STableNode* pTable) { SExprNode* pExpr = (SExprNode*)pNode; if (0 == strcmp(pCol->colName, pExpr->aliasName) || (isPrimaryKey((STempTableNode*)pTable, pNode) && isInternalPrimaryKey(pCol))) { - setColumnInfoByExpr(pTable, pExpr, pCol); + setColumnInfoByExpr(pTable, pExpr, pColRef); found = true; break; } @@ -457,36 +498,36 @@ static bool findAndSetColumn(SColumnNode* pCol, const STableNode* pTable) { return found; } -static EDealRes translateColumnWithPrefix(STranslateContext* pCxt, SColumnNode* pCol) { +static EDealRes translateColumnWithPrefix(STranslateContext* pCxt, SColumnNode** pCol) { SArray* pTables = taosArrayGetP(pCxt->pNsLevel, pCxt->currLevel); size_t nums = taosArrayGetSize(pTables); bool foundTable = false; for (size_t i = 0; i < nums; ++i) { STableNode* pTable = taosArrayGetP(pTables, i); - if (belongTable(pCxt->pParseCxt->db, pCol, pTable)) { + if (belongTable(pCxt->pParseCxt->db, (*pCol), pTable)) { foundTable = true; if (findAndSetColumn(pCol, pTable)) { break; } - return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_INVALID_COLUMN, pCol->colName); + return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_INVALID_COLUMN, (*pCol)->colName); } } if (!foundTable) { - return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_TABLE_NOT_EXIST, pCol->tableAlias); + return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_TABLE_NOT_EXIST, (*pCol)->tableAlias); } return DEAL_RES_CONTINUE; } -static EDealRes translateColumnWithoutPrefix(STranslateContext* pCxt, SColumnNode* pCol) { +static EDealRes translateColumnWithoutPrefix(STranslateContext* pCxt, SColumnNode** pCol) { SArray* pTables = taosArrayGetP(pCxt->pNsLevel, pCxt->currLevel); size_t nums = taosArrayGetSize(pTables); bool found = false; - bool isInternalPk = isInternalPrimaryKey(pCol); + bool isInternalPk = isInternalPrimaryKey(*pCol); for (size_t i = 0; i < nums; ++i) { STableNode* pTable = taosArrayGetP(pTables, i); if (findAndSetColumn(pCol, pTable)) { if (found) { - return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_AMBIGUOUS_COLUMN, pCol->colName); + return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_AMBIGUOUS_COLUMN, (*pCol)->colName); } found = true; if (isInternalPk) { @@ -501,18 +542,18 @@ static EDealRes translateColumnWithoutPrefix(STranslateContext* pCxt, SColumnNod } return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_INVALID_INTERNAL_PK); } else { - return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_INVALID_COLUMN, pCol->colName); + return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_INVALID_COLUMN, (*pCol)->colName); } } return DEAL_RES_CONTINUE; } -static bool translateColumnUseAlias(STranslateContext* pCxt, SColumnNode* pCol) { +static bool translateColumnUseAlias(STranslateContext* pCxt, SColumnNode** pCol) { SNodeList* pProjectionList = pCxt->pCurrStmt->pProjectionList; SNode* pNode; FOREACH(pNode, pProjectionList) { SExprNode* pExpr = (SExprNode*)pNode; - if (0 == strcmp(pCol->colName, pExpr->aliasName)) { + if (0 == strcmp((*pCol)->colName, pExpr->aliasName)) { setColumnInfoByExpr(NULL, pExpr, pCol); return true; } @@ -520,14 +561,14 @@ static bool translateColumnUseAlias(STranslateContext* pCxt, SColumnNode* pCol) return false; } -static EDealRes translateColumn(STranslateContext* pCxt, SColumnNode* pCol) { +static EDealRes translateColumn(STranslateContext* pCxt, SColumnNode** pCol) { // count(*)/first(*)/last(*) and so on - if (0 == strcmp(pCol->colName, "*")) { + if (0 == strcmp((*pCol)->colName, "*")) { return DEAL_RES_CONTINUE; } EDealRes res = DEAL_RES_CONTINUE; - if ('\0' != pCol->tableAlias[0]) { + if ('\0' != (*pCol)->tableAlias[0]) { res = translateColumnWithPrefix(pCxt, pCol); } else { bool found = false; @@ -539,17 +580,18 @@ static EDealRes translateColumn(STranslateContext* pCxt, SColumnNode* pCol) { return res; } -static int32_t parseTimeFromValueNode(SValueNode* pVal) { - if (IS_SIGNED_NUMERIC_TYPE(pVal->node.resType.type)) { - return TSDB_CODE_SUCCESS; - } else if (IS_UNSIGNED_NUMERIC_TYPE(pVal->node.resType.type)) { - pVal->datum.i = pVal->datum.u; - return TSDB_CODE_SUCCESS; - } else if (IS_FLOAT_TYPE(pVal->node.resType.type)) { - pVal->datum.i = pVal->datum.d; - return TSDB_CODE_SUCCESS; - } else if (TSDB_DATA_TYPE_BOOL == pVal->node.resType.type) { - pVal->datum.i = pVal->datum.b; +static int32_t parseTimeFromValueNode(STranslateContext* pCxt, SValueNode* pVal) { + if (IS_NUMERIC_TYPE(pVal->node.resType.type) || TSDB_DATA_TYPE_BOOL == pVal->node.resType.type) { + if (DEAL_RES_ERROR == translateValue(pCxt, pVal)) { + return pCxt->errCode; + } + if (IS_UNSIGNED_NUMERIC_TYPE(pVal->node.resType.type)) { + pVal->datum.i = pVal->datum.u; + } else if (IS_FLOAT_TYPE(pVal->node.resType.type)) { + pVal->datum.i = pVal->datum.d; + } else if (TSDB_DATA_TYPE_BOOL == pVal->node.resType.type) { + pVal->datum.i = pVal->datum.b; + } return TSDB_CODE_SUCCESS; } else if (IS_VAR_DATA_TYPE(pVal->node.resType.type) || TSDB_DATA_TYPE_TIMESTAMP == pVal->node.resType.type) { if (TSDB_CODE_SUCCESS == taosParseTime(pVal->literal, &pVal->datum.i, pVal->node.resType.bytes, @@ -585,99 +627,88 @@ static EDealRes translateValueImpl(STranslateContext* pCxt, SValueNode* pVal, SD *(bool*)&pVal->typeData = pVal->datum.b; break; case TSDB_DATA_TYPE_TINYINT: { - char* endPtr = NULL; - pVal->datum.i = taosStr2Int64(pVal->literal, &endPtr, 10); + pVal->datum.i = taosStr2Int64(pVal->literal, NULL, 10); *(int8_t*)&pVal->typeData = pVal->datum.i; break; } case TSDB_DATA_TYPE_SMALLINT: { - char* endPtr = NULL; - pVal->datum.i = taosStr2Int64(pVal->literal, &endPtr, 10); + pVal->datum.i = taosStr2Int64(pVal->literal, NULL, 10); *(int16_t*)&pVal->typeData = pVal->datum.i; break; } case TSDB_DATA_TYPE_INT: { - char* endPtr = NULL; - pVal->datum.i = taosStr2Int64(pVal->literal, &endPtr, 10); + pVal->datum.i = taosStr2Int64(pVal->literal, NULL, 10); *(int32_t*)&pVal->typeData = pVal->datum.i; break; } case TSDB_DATA_TYPE_BIGINT: { - char* endPtr = NULL; - pVal->datum.i = taosStr2Int64(pVal->literal, &endPtr, 10); + pVal->datum.i = taosStr2Int64(pVal->literal, NULL, 10); *(int64_t*)&pVal->typeData = pVal->datum.i; break; } case TSDB_DATA_TYPE_UTINYINT: { - char* endPtr = NULL; - pVal->datum.u = taosStr2UInt64(pVal->literal, &endPtr, 10); + pVal->datum.u = taosStr2UInt64(pVal->literal, NULL, 10); *(uint8_t*)&pVal->typeData = pVal->datum.u; break; } case TSDB_DATA_TYPE_USMALLINT: { - char* endPtr = NULL; - pVal->datum.u = taosStr2UInt64(pVal->literal, &endPtr, 10); + pVal->datum.u = taosStr2UInt64(pVal->literal, NULL, 10); *(uint16_t*)&pVal->typeData = pVal->datum.u; break; } case TSDB_DATA_TYPE_UINT: { - char* endPtr = NULL; - pVal->datum.u = taosStr2UInt64(pVal->literal, &endPtr, 10); + pVal->datum.u = taosStr2UInt64(pVal->literal, NULL, 10); *(uint32_t*)&pVal->typeData = pVal->datum.u; break; } case TSDB_DATA_TYPE_UBIGINT: { - char* endPtr = NULL; - pVal->datum.u = taosStr2UInt64(pVal->literal, &endPtr, 10); + pVal->datum.u = taosStr2UInt64(pVal->literal, NULL, 10); *(uint64_t*)&pVal->typeData = pVal->datum.u; break; } case TSDB_DATA_TYPE_FLOAT: { - char* endPtr = NULL; - pVal->datum.d = taosStr2Double(pVal->literal, &endPtr); + pVal->datum.d = taosStr2Double(pVal->literal, NULL); *(float*)&pVal->typeData = pVal->datum.d; break; } case TSDB_DATA_TYPE_DOUBLE: { - char* endPtr = NULL; - pVal->datum.d = taosStr2Double(pVal->literal, &endPtr); + pVal->datum.d = taosStr2Double(pVal->literal, NULL); *(double*)&pVal->typeData = pVal->datum.d; break; } case TSDB_DATA_TYPE_VARCHAR: case TSDB_DATA_TYPE_VARBINARY: { - pVal->datum.p = taosMemoryCalloc(1, targetDt.bytes + VARSTR_HEADER_SIZE + 1); + pVal->datum.p = taosMemoryCalloc(1, targetDt.bytes + 1); if (NULL == pVal->datum.p) { return generateDealNodeErrMsg(pCxt, TSDB_CODE_OUT_OF_MEMORY); } - varDataSetLen(pVal->datum.p, targetDt.bytes); - strncpy(varDataVal(pVal->datum.p), pVal->literal, targetDt.bytes); + int32_t len = TMIN(targetDt.bytes - VARSTR_HEADER_SIZE, pVal->node.resType.bytes); + varDataSetLen(pVal->datum.p, len); + strncpy(varDataVal(pVal->datum.p), pVal->literal, len); break; } case TSDB_DATA_TYPE_TIMESTAMP: { - if (TSDB_CODE_SUCCESS != parseTimeFromValueNode(pVal)) { + if (TSDB_CODE_SUCCESS != parseTimeFromValueNode(pCxt, pVal)) { return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); } *(int64_t*)&pVal->typeData = pVal->datum.i; break; } case TSDB_DATA_TYPE_NCHAR: { - int32_t bytes = targetDt.bytes * TSDB_NCHAR_SIZE; - pVal->datum.p = taosMemoryCalloc(1, bytes + VARSTR_HEADER_SIZE + 1); + pVal->datum.p = taosMemoryCalloc(1, targetDt.bytes + 1); if (NULL == pVal->datum.p) { return generateDealNodeErrMsg(pCxt, TSDB_CODE_OUT_OF_MEMORY); ; } - int32_t output = 0; - if (!taosMbsToUcs4(pVal->literal, pVal->node.resType.bytes, (TdUcs4*)varDataVal(pVal->datum.p), bytes, - &output)) { + int32_t len = 0; + if (!taosMbsToUcs4(pVal->literal, pVal->node.resType.bytes, (TdUcs4*)varDataVal(pVal->datum.p), + targetDt.bytes - VARSTR_HEADER_SIZE, &len)) { return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); } - varDataSetLen(pVal->datum.p, output); + varDataSetLen(pVal->datum.p, len); break; } - case TSDB_DATA_TYPE_JSON: case TSDB_DATA_TYPE_DECIMAL: case TSDB_DATA_TYPE_BLOB: return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); @@ -690,8 +721,20 @@ static EDealRes translateValueImpl(STranslateContext* pCxt, SValueNode* pVal, SD return DEAL_RES_CONTINUE; } +static int32_t calcTypeBytes(SDataType dt) { + if (TSDB_DATA_TYPE_BINARY == dt.type) { + return dt.bytes + VARSTR_HEADER_SIZE; + } else if (TSDB_DATA_TYPE_NCHAR == dt.type) { + return dt.bytes * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE; + } else { + return dt.bytes; + } +} + static EDealRes translateValue(STranslateContext* pCxt, SValueNode* pVal) { - return translateValueImpl(pCxt, pVal, pVal->node.resType); + SDataType dt = pVal->node.resType; + dt.bytes = calcTypeBytes(dt); + return translateValueImpl(pCxt, pVal, dt); } static bool isMultiResFunc(SNode* pNode) { @@ -709,25 +752,36 @@ static bool isMultiResFunc(SNode* pNode) { return (QUERY_NODE_COLUMN == nodeType(pParam) ? 0 == strcmp(((SColumnNode*)pParam)->colName, "*") : false); } -static EDealRes translateUnaryOperator(STranslateContext* pCxt, SOperatorNode* pOp) { +static int32_t rewriteNegativeOperator(SNode** pOp) { + SNode* pRes = NULL; + int32_t code = scalarCalculateConstants(*pOp, &pRes); + if (TSDB_CODE_SUCCESS == code) { + *pOp = pRes; + } + return code; +} + +static EDealRes translateUnaryOperator(STranslateContext* pCxt, SOperatorNode** pOpRef) { + SOperatorNode* pOp = *pOpRef; if (OP_TYPE_MINUS == pOp->opType) { if (!IS_MATHABLE_TYPE(((SExprNode*)(pOp->pLeft))->resType.type)) { return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, ((SExprNode*)(pOp->pLeft))->aliasName); } pOp->node.resType.type = TSDB_DATA_TYPE_DOUBLE; pOp->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes; + + pCxt->errCode = rewriteNegativeOperator((SNode**)pOpRef); } else { pOp->node.resType.type = TSDB_DATA_TYPE_BOOL; pOp->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_BOOL].bytes; } - return DEAL_RES_CONTINUE; + return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR; } static EDealRes translateArithmeticOperator(STranslateContext* pCxt, SOperatorNode* pOp) { SDataType ldt = ((SExprNode*)(pOp->pLeft))->resType; SDataType rdt = ((SExprNode*)(pOp->pRight))->resType; - if (TSDB_DATA_TYPE_JSON == ldt.type || TSDB_DATA_TYPE_BLOB == ldt.type || TSDB_DATA_TYPE_JSON == rdt.type || - TSDB_DATA_TYPE_BLOB == rdt.type) { + if (TSDB_DATA_TYPE_BLOB == ldt.type || TSDB_DATA_TYPE_BLOB == rdt.type) { return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, ((SExprNode*)(pOp->pRight))->aliasName); } if ((TSDB_DATA_TYPE_TIMESTAMP == ldt.type && TSDB_DATA_TYPE_TIMESTAMP == rdt.type) || @@ -752,14 +806,14 @@ static EDealRes translateArithmeticOperator(STranslateContext* pCxt, SOperatorNo static EDealRes translateComparisonOperator(STranslateContext* pCxt, SOperatorNode* pOp) { SDataType ldt = ((SExprNode*)(pOp->pLeft))->resType; SDataType rdt = ((SExprNode*)(pOp->pRight))->resType; - if (TSDB_DATA_TYPE_BLOB == ldt.type || TSDB_DATA_TYPE_JSON == rdt.type || TSDB_DATA_TYPE_BLOB == rdt.type) { + if (TSDB_DATA_TYPE_BLOB == ldt.type || TSDB_DATA_TYPE_BLOB == rdt.type) { return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, ((SExprNode*)(pOp->pRight))->aliasName); } if (OP_TYPE_IN == pOp->opType || OP_TYPE_NOT_IN == pOp->opType) { ((SExprNode*)pOp->pRight)->resType = ((SExprNode*)pOp->pLeft)->resType; } if (nodesIsRegularOp(pOp)) { - if (!IS_STR_DATA_TYPE(((SExprNode*)(pOp->pLeft))->resType.type)) { + if (!IS_VAR_DATA_TYPE(((SExprNode*)(pOp->pLeft))->resType.type)) { return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, ((SExprNode*)(pOp->pLeft))->aliasName); } if (QUERY_NODE_VALUE != nodeType(pOp->pRight) || !IS_STR_DATA_TYPE(((SExprNode*)(pOp->pRight))->resType.type)) { @@ -782,7 +836,9 @@ static EDealRes translateJsonOperator(STranslateContext* pCxt, SOperatorNode* pO return DEAL_RES_CONTINUE; } -static EDealRes translateOperator(STranslateContext* pCxt, SOperatorNode* pOp) { +static EDealRes translateOperator(STranslateContext* pCxt, SOperatorNode** pOpRef) { + SOperatorNode* pOp = *pOpRef; + if (isMultiResFunc(pOp->pLeft)) { return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, ((SExprNode*)(pOp->pLeft))->aliasName); } @@ -791,7 +847,7 @@ static EDealRes translateOperator(STranslateContext* pCxt, SOperatorNode* pOp) { } if (nodesIsUnaryOp(pOp)) { - return translateUnaryOperator(pCxt, pOp); + return translateUnaryOperator(pCxt, pOpRef); } else if (nodesIsArithmeticOp(pOp)) { return translateArithmeticOperator(pCxt, pOp); } else if (nodesIsComparisonOp(pOp)) { @@ -802,11 +858,11 @@ static EDealRes translateOperator(STranslateContext* pCxt, SOperatorNode* pOp) { return DEAL_RES_CONTINUE; } -static EDealRes haveAggOrNonstdFunction(SNode* pNode, void* pContext) { +static EDealRes haveVectorFunction(SNode* pNode, void* pContext) { if (isAggFunc(pNode)) { *((bool*)pContext) = true; return DEAL_RES_END; - } else if (isNonstandardSQLFunc(pNode)) { + } else if (isIndefiniteRowsFunc(pNode)) { *((bool*)pContext) = true; return DEAL_RES_END; } @@ -847,10 +903,67 @@ static int32_t rewriteCountStar(STranslateContext* pCxt, SFunctionNode* pCount) static bool hasInvalidFuncNesting(SNodeList* pParameterList) { bool hasInvalidFunc = false; - nodesWalkExprs(pParameterList, haveAggOrNonstdFunction, &hasInvalidFunc); + nodesWalkExprs(pParameterList, haveVectorFunction, &hasInvalidFunc); return hasInvalidFunc; } +static int32_t getFuncInfo(STranslateContext* pCxt, SFunctionNode* pFunc) { + int32_t code = fmGetFuncInfo(pFunc, pCxt->msgBuf.buf, pCxt->msgBuf.len); + if (TSDB_CODE_FUNC_NOT_BUILTIN_FUNTION == code) { + code = getUdfInfo(pCxt, pFunc); + } + return code; +} + +static int32_t translateAggFunc(STranslateContext* pCxt, SFunctionNode* pFunc) { + if (beforeHaving(pCxt->currClause)) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ILLEGAL_USE_AGG_FUNCTION); + } + if (hasInvalidFuncNesting(pFunc->pParameterList)) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_AGG_FUNC_NESTING); + } + if (pCxt->pCurrStmt->hasIndefiniteRowsFunc) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC); + } + + if (isCountStar(pFunc)) { + return rewriteCountStar(pCxt, pFunc); + } + return TSDB_CODE_SUCCESS; +} + +static int32_t translateScanPseudoColumnFunc(STranslateContext* pCxt, SFunctionNode* pFunc) { + if (0 == LIST_LENGTH(pFunc->pParameterList)) { + if (QUERY_NODE_REAL_TABLE != nodeType(pCxt->pCurrStmt->pFromTable)) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TBNAME); + } + } else { + SValueNode* pVal = nodesListGetNode(pFunc->pParameterList, 0); + STableNode* pTable = NULL; + pCxt->errCode = findTable(pCxt, pVal->literal, &pTable); + if (TSDB_CODE_SUCCESS == pCxt->errCode && (NULL == pTable || QUERY_NODE_REAL_TABLE != nodeType(pTable))) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TBNAME); + } + } + return TSDB_CODE_SUCCESS; +} + +static int32_t translateIndefiniteRowsFunc(STranslateContext* pCxt, SFunctionNode* pFunc) { + if (SQL_CLAUSE_SELECT != pCxt->currClause || pCxt->pCurrStmt->hasIndefiniteRowsFunc || pCxt->pCurrStmt->hasAggFuncs) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC); + } + if (hasInvalidFuncNesting(pFunc->pParameterList)) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_AGG_FUNC_NESTING); + } + return TSDB_CODE_SUCCESS; +} + +static void setFuncClassification(SSelectStmt* pSelect, SFunctionNode* pFunc) { + pSelect->hasAggFuncs = pSelect->hasAggFuncs ? true : fmIsAggFunc(pFunc->funcId); + pSelect->hasRepeatScanFuncs = pSelect->hasRepeatScanFuncs ? true : fmIsRepeatScanFunc(pFunc->funcId); + pSelect->hasIndefiniteRowsFunc = pSelect->hasIndefiniteRowsFunc ? true : fmIsIndefiniteRowsFunc(pFunc->funcId); +} + static EDealRes translateFunction(STranslateContext* pCxt, SFunctionNode* pFunc) { SNode* pParam = NULL; FOREACH(pParam, pFunc->pParameterList) { @@ -859,54 +972,18 @@ static EDealRes translateFunction(STranslateContext* pCxt, SFunctionNode* pFunc) } } - SFmGetFuncInfoParam param = {.pCtg = pCxt->pParseCxt->pCatalog, - .pRpc = pCxt->pParseCxt->pTransporter, - .pMgmtEps = &pCxt->pParseCxt->mgmtEpSet, - .pErrBuf = pCxt->msgBuf.buf, - .errBufLen = pCxt->msgBuf.len}; - pCxt->errCode = fmGetFuncInfo(¶m, pFunc); + pCxt->errCode = getFuncInfo(pCxt, pFunc); if (TSDB_CODE_SUCCESS == pCxt->errCode && fmIsAggFunc(pFunc->funcId)) { - if (beforeHaving(pCxt->currClause)) { - return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_ILLEGAL_USE_AGG_FUNCTION); - } - if (hasInvalidFuncNesting(pFunc->pParameterList)) { - return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_AGG_FUNC_NESTING); - } - if (pCxt->pCurrStmt->hasNonstdSQLFunc) { - return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_NOT_ALLOWED_FUNC); - } - - pCxt->pCurrStmt->hasAggFuncs = true; - if (isCountStar(pFunc)) { - pCxt->errCode = rewriteCountStar(pCxt, pFunc); - } - - if (fmIsRepeatScanFunc(pFunc->funcId)) { - pCxt->pCurrStmt->hasRepeatScanFuncs = true; - } + pCxt->errCode = translateAggFunc(pCxt, pFunc); } if (TSDB_CODE_SUCCESS == pCxt->errCode && fmIsScanPseudoColumnFunc(pFunc->funcId)) { - if (0 == LIST_LENGTH(pFunc->pParameterList)) { - if (QUERY_NODE_REAL_TABLE != nodeType(pCxt->pCurrStmt->pFromTable)) { - return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_INVALID_TBNAME); - } - } else { - SValueNode* pVal = nodesListGetNode(pFunc->pParameterList, 0); - STableNode* pTable = NULL; - pCxt->errCode = findTable(pCxt, pVal->literal, &pTable); - if (TSDB_CODE_SUCCESS == pCxt->errCode && (NULL == pTable || QUERY_NODE_REAL_TABLE != nodeType(pTable))) { - return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_INVALID_TBNAME); - } - } + pCxt->errCode = translateScanPseudoColumnFunc(pCxt, pFunc); } - if (TSDB_CODE_SUCCESS == pCxt->errCode && fmIsNonstandardSQLFunc(pFunc->funcId)) { - if (SQL_CLAUSE_SELECT != pCxt->currClause || pCxt->pCurrStmt->hasNonstdSQLFunc || pCxt->pCurrStmt->hasAggFuncs) { - return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_NOT_ALLOWED_FUNC); - } - if (hasInvalidFuncNesting(pFunc->pParameterList)) { - return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_AGG_FUNC_NESTING); - } - pCxt->pCurrStmt->hasNonstdSQLFunc = true; + if (TSDB_CODE_SUCCESS == pCxt->errCode && fmIsIndefiniteRowsFunc(pFunc->funcId)) { + pCxt->errCode = translateIndefiniteRowsFunc(pCxt, pFunc); + } + if (TSDB_CODE_SUCCESS == pCxt->errCode) { + setFuncClassification(pCxt->pCurrStmt, pFunc); } return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR; } @@ -921,34 +998,34 @@ static EDealRes translateLogicCond(STranslateContext* pCxt, SLogicConditionNode* return DEAL_RES_CONTINUE; } -static EDealRes doTranslateExpr(SNode* pNode, void* pContext) { +static EDealRes doTranslateExpr(SNode** pNode, void* pContext) { STranslateContext* pCxt = (STranslateContext*)pContext; - switch (nodeType(pNode)) { + switch (nodeType(*pNode)) { case QUERY_NODE_COLUMN: - return translateColumn(pCxt, (SColumnNode*)pNode); + return translateColumn(pCxt, (SColumnNode**)pNode); case QUERY_NODE_VALUE: - return translateValue(pCxt, (SValueNode*)pNode); + return translateValue(pCxt, (SValueNode*)*pNode); case QUERY_NODE_OPERATOR: - return translateOperator(pCxt, (SOperatorNode*)pNode); + return translateOperator(pCxt, (SOperatorNode**)pNode); case QUERY_NODE_FUNCTION: - return translateFunction(pCxt, (SFunctionNode*)pNode); + return translateFunction(pCxt, (SFunctionNode*)*pNode); case QUERY_NODE_LOGIC_CONDITION: - return translateLogicCond(pCxt, (SLogicConditionNode*)pNode); + return translateLogicCond(pCxt, (SLogicConditionNode*)*pNode); case QUERY_NODE_TEMP_TABLE: - return translateExprSubquery(pCxt, ((STempTableNode*)pNode)->pSubquery); + return translateExprSubquery(pCxt, ((STempTableNode*)*pNode)->pSubquery); default: break; } return DEAL_RES_CONTINUE; } -static int32_t translateExpr(STranslateContext* pCxt, SNode* pNode) { - nodesWalkExprPostOrder(pNode, doTranslateExpr, pCxt); +static int32_t translateExpr(STranslateContext* pCxt, SNode** pNode) { + nodesRewriteExprPostOrder(pNode, doTranslateExpr, pCxt); return pCxt->errCode; } static int32_t translateExprList(STranslateContext* pCxt, SNodeList* pList) { - nodesWalkExprsPostOrder(pList, doTranslateExpr, pCxt); + nodesRewriteExprsPostOrder(pList, doTranslateExpr, pCxt); return pCxt->errCode; } @@ -990,10 +1067,11 @@ static EDealRes rewriteColToSelectValFunc(STranslateContext* pCxt, SNode** pNode strcpy(pFunc->node.aliasName, ((SExprNode*)*pNode)->aliasName); pCxt->errCode = nodesListMakeAppend(&pFunc->pParameterList, *pNode); if (TSDB_CODE_SUCCESS == pCxt->errCode) { - translateFunction(pCxt, pFunc); + pCxt->errCode == getFuncInfo(pCxt, pFunc); } if (TSDB_CODE_SUCCESS == pCxt->errCode) { *pNode = (SNode*)pFunc; + pCxt->pCurrStmt->hasSelectValFunc = true; } else { nodesDestroyNode(pFunc); } @@ -1060,7 +1138,7 @@ static int32_t checkExprListForGroupBy(STranslateContext* pCxt, SNodeList* pList } static EDealRes rewriteColsToSelectValFuncImpl(SNode** pNode, void* pContext) { - if (isAggFunc(*pNode)) { + if (isAggFunc(*pNode) || isIndefiniteRowsFunc(*pNode)) { return DEAL_RES_IGNORE_CHILD; } if (isScanPseudoColumnFunc(*pNode) || QUERY_NODE_COLUMN == nodeType(*pNode)) { @@ -1081,7 +1159,7 @@ typedef struct CheckAggColCoexistCxt { STranslateContext* pTranslateCxt; bool existAggFunc; bool existCol; - bool existNonstdFunc; + bool existIndefiniteRowsFunc; int32_t selectFuncNum; bool existOtherAggFunc; } CheckAggColCoexistCxt; @@ -1097,8 +1175,8 @@ static EDealRes doCheckAggColCoexist(SNode* pNode, void* pContext) { pCxt->existAggFunc = true; return DEAL_RES_IGNORE_CHILD; } - if (isNonstandardSQLFunc(pNode)) { - pCxt->existNonstdFunc = true; + if (isIndefiniteRowsFunc(pNode)) { + pCxt->existIndefiniteRowsFunc = true; return DEAL_RES_IGNORE_CHILD; } if (isScanPseudoColumnFunc(pNode) || QUERY_NODE_COLUMN == nodeType(pNode)) { @@ -1114,7 +1192,7 @@ static int32_t checkAggColCoexist(STranslateContext* pCxt, SSelectStmt* pSelect) CheckAggColCoexistCxt cxt = {.pTranslateCxt = pCxt, .existAggFunc = false, .existCol = false, - .existNonstdFunc = false, + .existIndefiniteRowsFunc = false, .selectFuncNum = 0, .existOtherAggFunc = false}; nodesWalkExprs(pSelect->pProjectionList, doCheckAggColCoexist, &cxt); @@ -1127,7 +1205,7 @@ static int32_t checkAggColCoexist(STranslateContext* pCxt, SSelectStmt* pSelect) if ((cxt.selectFuncNum > 1 || cxt.existAggFunc || NULL != pSelect->pWindow) && cxt.existCol) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_SINGLE_GROUP); } - if (cxt.existNonstdFunc && cxt.existCol) { + if (cxt.existIndefiniteRowsFunc && cxt.existCol) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC); } return TSDB_CODE_SUCCESS; @@ -1168,7 +1246,6 @@ static int32_t setSysTableVgroupList(STranslateContext* pCxt, SName* pName, SRea int32_t code = TSDB_CODE_SUCCESS; SArray* vgroupList = NULL; if ('\0' != pRealTable->qualDbName[0]) { - // todo release after mnode can be processed if (0 != strcmp(pRealTable->qualDbName, TSDB_INFORMATION_SCHEMA_DB)) { code = getDBVgInfo(pCxt, pRealTable->qualDbName, &vgroupList); } @@ -1176,7 +1253,6 @@ static int32_t setSysTableVgroupList(STranslateContext* pCxt, SName* pName, SRea code = getDBVgInfoImpl(pCxt, pName, &vgroupList); } - // todo release after mnode can be processed if (TSDB_CODE_SUCCESS == code) { code = addMnodeToVgroupList(&pCxt->pParseCxt->mgmtEpSet, &vgroupList); } @@ -1197,7 +1273,7 @@ static int32_t setTableVgroupList(STranslateContext* pCxt, SName* pName, SRealTa int32_t code = TSDB_CODE_SUCCESS; if (TSDB_SUPER_TABLE == pRealTable->pMeta->tableType) { SArray* vgroupList = NULL; - code = getTableDistVgInfo(pCxt, pName, &vgroupList); + code = getDBVgInfoImpl(pCxt, pName, &vgroupList); if (TSDB_CODE_SUCCESS == code) { code = toVgroupsInfo(vgroupList, &pRealTable->pVgroupList); } @@ -1222,12 +1298,31 @@ static uint8_t getStmtPrecision(SNode* pStmt) { return 0; } +static bool stmtIsSingleTable(SNode* pStmt) { + if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) { + return ((STableNode*)((SSelectStmt*)pStmt)->pFromTable)->singleTable; + } + return false; +} + static uint8_t getJoinTablePrecision(SJoinTableNode* pJoinTable) { uint8_t lp = ((STableNode*)pJoinTable->pLeft)->precision; uint8_t rp = ((STableNode*)pJoinTable->pRight)->precision; return (lp > rp ? rp : lp); } +static bool joinTableIsSingleTable(SJoinTableNode* pJoinTable) { + return (((STableNode*)pJoinTable->pLeft)->singleTable && ((STableNode*)pJoinTable->pRight)->singleTable); +} + +static bool isSingleTable(SRealTableNode* pRealTable) { + int8_t tableType = pRealTable->pMeta->tableType; + if (TSDB_SYSTEM_TABLE == tableType) { + return 0 != strcmp(pRealTable->table.tableName, TSDB_INS_TABLE_USER_TABLES); + } + return (TSDB_CHILD_TABLE == tableType || TSDB_NORMAL_TABLE == tableType); +} + static int32_t translateTable(STranslateContext* pCxt, SNode* pTable) { int32_t code = TSDB_CODE_SUCCESS; switch (nodeType(pTable)) { @@ -1246,6 +1341,7 @@ static int32_t translateTable(STranslateContext* pCxt, SNode* pTable) { code = setTableVgroupList(pCxt, &name, pRealTable); } pRealTable->table.precision = pRealTable->pMeta->tableInfo.precision; + pRealTable->table.singleTable = isSingleTable(pRealTable); if (TSDB_CODE_SUCCESS == code) { code = addNamespace(pCxt, pRealTable); } @@ -1256,6 +1352,7 @@ static int32_t translateTable(STranslateContext* pCxt, SNode* pTable) { code = translateSubquery(pCxt, pTempTable->pSubquery); if (TSDB_CODE_SUCCESS == code) { pTempTable->table.precision = getStmtPrecision(pTempTable->pSubquery); + pTempTable->table.singleTable = stmtIsSingleTable(pTempTable->pSubquery); code = addNamespace(pCxt, pTempTable); } break; @@ -1268,7 +1365,8 @@ static int32_t translateTable(STranslateContext* pCxt, SNode* pTable) { } if (TSDB_CODE_SUCCESS == code) { pJoinTable->table.precision = getJoinTablePrecision(pJoinTable); - code = translateExpr(pCxt, pJoinTable->pOnCond); + pJoinTable->table.singleTable = joinTableIsSingleTable(pJoinTable); + code = translateExpr(pCxt, &pJoinTable->pOnCond); } break; } @@ -1501,7 +1599,7 @@ static int32_t translateOrderByPosition(STranslateContext* pCxt, SNodeList* pPro if (NULL == pCol) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_OUT_OF_MEMORY); } - setColumnInfoByExpr(NULL, (SExprNode*)nodesListGetNode(pProjectionList, pos - 1), pCol); + setColumnInfoByExpr(NULL, (SExprNode*)nodesListGetNode(pProjectionList, pos - 1), &pCol); ((SOrderByExprNode*)pNode)->pExpr = (SNode*)pCol; nodesDestroyNode(pExpr); } @@ -1547,7 +1645,7 @@ static int32_t translateHaving(STranslateContext* pCxt, SSelectStmt* pSelect) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_GROUPBY_LACK_EXPRESSION); } pCxt->currClause = SQL_CLAUSE_HAVING; - int32_t code = translateExpr(pCxt, pSelect->pHaving); + int32_t code = translateExpr(pCxt, &pSelect->pHaving); if (TSDB_CODE_SUCCESS == code) { code = checkExprForGroupBy(pCxt, &pSelect->pHaving); } @@ -1795,7 +1893,7 @@ static int32_t translateWindow(STranslateContext* pCxt, SSelectStmt* pSelect) { return TSDB_CODE_SUCCESS; } pCxt->currClause = SQL_CLAUSE_WINDOW; - int32_t code = translateExpr(pCxt, pSelect->pWindow); + int32_t code = translateExpr(pCxt, &pSelect->pWindow); if (TSDB_CODE_SUCCESS == code) { code = checkWindow(pCxt, pSelect); } @@ -1807,7 +1905,7 @@ static int32_t translatePartitionBy(STranslateContext* pCxt, SNodeList* pPartiti return translateExprList(pCxt, pPartitionByList); } -static int32_t translateWhere(STranslateContext* pCxt, SNode* pWhere) { +static int32_t translateWhere(STranslateContext* pCxt, SNode** pWhere) { pCxt->currClause = SQL_CLAUSE_WHERE; return translateExpr(pCxt, pWhere); } @@ -1841,7 +1939,7 @@ static int32_t createPrimaryKeyColByTable(STranslateContext* pCxt, STableNode* p } pCol->colId = PRIMARYKEY_TIMESTAMP_COL_ID; strcpy(pCol->colName, PK_TS_COL_INTERNAL_NAME); - if (!findAndSetColumn(pCol, pTable)) { + if (!findAndSetColumn(&pCol, pTable)) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TIMELINE_FUNC); } *pPrimaryKey = (SNode*)pCol; @@ -1880,7 +1978,7 @@ static int32_t translateSelect(STranslateContext* pCxt, SSelectStmt* pSelect) { pCxt->pCurrStmt = pSelect; int32_t code = translateFrom(pCxt, pSelect); if (TSDB_CODE_SUCCESS == code) { - code = translateWhere(pCxt, pSelect->pWhere); + code = translateWhere(pCxt, &pSelect->pWhere); } if (TSDB_CODE_SUCCESS == code) { code = translatePartitionBy(pCxt, pSelect->pPartitionByList); @@ -1939,7 +2037,7 @@ static int32_t createCastFunc(STranslateContext* pCxt, SNode* pExpr, SDataType d nodesDestroyNode(pFunc); return TSDB_CODE_OUT_OF_MEMORY; } - if (DEAL_RES_ERROR == translateFunction(pCxt, pFunc)) { + if (TSDB_CODE_SUCCESS != getFuncInfo(pCxt, pFunc)) { nodesClearList(pFunc->pParameterList); pFunc->pParameterList = NULL; nodesDestroyNode(pFunc); @@ -2059,6 +2157,7 @@ static int32_t buildCreateDbReq(STranslateContext* pCxt, SCreateDatabaseStmt* pS pReq->replications = pStmt->pOptions->replica; pReq->strict = pStmt->pOptions->strict; pReq->cacheLastRow = pStmt->pOptions->cachelast; + pReq->schemaless = pStmt->pOptions->schemaless; pReq->ignoreExist = pStmt->ignoreExists; return buildCreateDbRetentions(pStmt->pOptions->pRetentions, pReq); } @@ -2258,6 +2357,9 @@ static int32_t checkDatabaseOptions(STranslateContext* pCxt, const char* pDbName if (TSDB_CODE_SUCCESS == code) { code = checkDbRetentionsOption(pCxt, pOptions->pRetentions); } + if (TSDB_CODE_SUCCESS == code) { + code = checkDbEnumOption(pCxt, "schemaless", pOptions->schemaless, TSDB_DB_SCHEMALESS_ON, TSDB_DB_SCHEMALESS_OFF); + } if (TSDB_CODE_SUCCESS == code) { code = checkOptionsDependency(pCxt, pDbName, pOptions); } @@ -2343,16 +2445,6 @@ static int32_t translateAlterDatabase(STranslateContext* pCxt, SAlterDatabaseStm return buildCmdMsg(pCxt, TDMT_MND_ALTER_DB, (FSerializeFunc)tSerializeSAlterDbReq, &alterReq); } -static int32_t calcTypeBytes(SDataType dt) { - if (TSDB_DATA_TYPE_BINARY == dt.type) { - return dt.bytes + VARSTR_HEADER_SIZE; - } else if (TSDB_DATA_TYPE_NCHAR == dt.type) { - return dt.bytes * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE; - } else { - return dt.bytes; - } -} - static int32_t columnDefNodeToField(SNodeList* pList, SArray** pArray) { *pArray = taosArrayInit(LIST_LENGTH(pList), sizeof(SField)); SNode* pNode; @@ -2478,6 +2570,9 @@ static int32_t checkTableColsSchema(STranslateContext* pCxt, SHashObj* pHash, SN code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_FIRST_COLUMN); } } + if (TSDB_CODE_SUCCESS == code && pCol->dataType.type == TSDB_DATA_TYPE_JSON) { + code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COL_JSON); + } int32_t len = strlen(pCol->colName); if (TSDB_CODE_SUCCESS == code && NULL != taosHashGet(pHash, pCol->colName, len)) { code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_DUPLICATED_COLUMN); @@ -2485,7 +2580,7 @@ static int32_t checkTableColsSchema(STranslateContext* pCxt, SHashObj* pHash, SN if (TSDB_CODE_SUCCESS == code) { if ((TSDB_DATA_TYPE_VARCHAR == pCol->dataType.type && calcTypeBytes(pCol->dataType) > TSDB_MAX_BINARY_LEN) || (TSDB_DATA_TYPE_NCHAR == pCol->dataType.type && calcTypeBytes(pCol->dataType) > TSDB_MAX_NCHAR_LEN)) { - code = code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN); + code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN); } } if (TSDB_CODE_SUCCESS == code) { @@ -2744,12 +2839,13 @@ static int32_t buildRollupAst(STranslateContext* pCxt, SCreateTableStmt* pStmt, int32_t code = getDBCfg(pCxt, pStmt->dbName, &dbCfg); int32_t num = taosArrayGetSize(dbCfg.pRetensions); if (TSDB_CODE_SUCCESS != code || num < 2) { + taosArrayDestroy(dbCfg.pRetensions); return code; } for (int32_t i = 1; i < num; ++i) { SRetention* pRetension = taosArrayGet(dbCfg.pRetensions, i); STranslateContext cxt = {0}; - initTranslateContext(pCxt->pParseCxt, &cxt); + initTranslateContext(pCxt->pParseCxt, pCxt->pMetaCache, &cxt); code = getRollupAst(&cxt, pStmt, pRetension, dbCfg.precision, 1 == i ? &pReq->pAst1 : &pReq->pAst2, 1 == i ? &pReq->ast1Len : &pReq->ast2Len); destroyTranslateContext(&cxt); @@ -2757,6 +2853,8 @@ static int32_t buildRollupAst(STranslateContext* pCxt, SCreateTableStmt* pStmt, break; } } + + taosArrayDestroy(dbCfg.pRetensions); return code; } @@ -3258,6 +3356,18 @@ static int32_t translateDropTopic(STranslateContext* pCxt, SDropTopicStmt* pStmt return buildCmdMsg(pCxt, TDMT_MND_DROP_TOPIC, (FSerializeFunc)tSerializeSMDropTopicReq, &dropReq); } +static int32_t translateDropCGroup(STranslateContext* pCxt, SDropCGroupStmt* pStmt) { + SMDropCgroupReq dropReq = {0}; + + SName name; + tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->topicName, strlen(pStmt->topicName)); + tNameGetFullDbName(&name, dropReq.topic); + dropReq.igNotExists = pStmt->ignoreNotExists; + strcpy(dropReq.cgroup, pStmt->cgroup); + + return buildCmdMsg(pCxt, TDMT_MND_DROP_CGROUP, (FSerializeFunc)tSerializeSMDropCgroupReq, &dropReq); +} + static int32_t translateAlterLocal(STranslateContext* pCxt, SAlterLocalStmt* pStmt) { // todo return TSDB_CODE_SUCCESS; @@ -3317,7 +3427,9 @@ static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt* pReq->igExists = pStmt->ignoreExists; SName name; - tNameExtractFullName(toName(pCxt->pParseCxt->acctId, pCxt->pParseCxt->db, pStmt->streamName, &name), pReq->name); + tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->streamName, strlen(pStmt->streamName)); + tNameGetFullDbName(&name, pReq->name); + // tNameExtractFullName(toName(pCxt->pParseCxt->acctId, pCxt->pParseCxt->db, pStmt->streamName, &name), pReq->name); if ('\0' != pStmt->targetTabName[0]) { strcpy(name.dbname, pStmt->targetDbName); @@ -3541,6 +3653,9 @@ static int32_t translateQuery(STranslateContext* pCxt, SNode* pNode) { case QUERY_NODE_DROP_TOPIC_STMT: code = translateDropTopic(pCxt, (SDropTopicStmt*)pNode); break; + case QUERY_NODE_DROP_CGROUP_STMT: + code = translateDropCGroup(pCxt, (SDropCGroupStmt*)pNode); + break; case QUERY_NODE_ALTER_LOCAL_STMT: code = translateAlterLocal(pCxt, (SAlterLocalStmt*)pNode); break; @@ -3692,7 +3807,6 @@ static const char* getSysDbName(ENodeType type) { case QUERY_NODE_SHOW_QNODES_STMT: case QUERY_NODE_SHOW_FUNCTIONS_STMT: case QUERY_NODE_SHOW_INDEXES_STMT: - case QUERY_NODE_SHOW_STREAMS_STMT: case QUERY_NODE_SHOW_BNODES_STMT: case QUERY_NODE_SHOW_SNODES_STMT: case QUERY_NODE_SHOW_LICENCE_STMT: @@ -3701,6 +3815,7 @@ static const char* getSysDbName(ENodeType type) { case QUERY_NODE_SHOW_CONNECTIONS_STMT: case QUERY_NODE_SHOW_QUERIES_STMT: case QUERY_NODE_SHOW_TOPICS_STMT: + case QUERY_NODE_SHOW_STREAMS_STMT: case QUERY_NODE_SHOW_TRANSACTIONS_STMT: return TSDB_PERFORMANCE_SCHEMA_DB; default: @@ -3734,7 +3849,7 @@ static const char* getSysTableName(ENodeType type) { case QUERY_NODE_SHOW_INDEXES_STMT: return TSDB_INS_TABLE_USER_INDEXES; case QUERY_NODE_SHOW_STREAMS_STMT: - return TSDB_INS_TABLE_USER_STREAMS; + return TSDB_PERFS_TABLE_STREAMS; case QUERY_NODE_SHOW_BNODES_STMT: return TSDB_INS_TABLE_BNODES; case QUERY_NODE_SHOW_SNODES_STMT: @@ -3877,7 +3992,7 @@ typedef struct SVgroupCreateTableBatch { static void destroyCreateTbReq(SVCreateTbReq* pReq) { taosMemoryFreeClear(pReq->name); - taosMemoryFreeClear(pReq->ntb.schema.pSchema); + taosMemoryFreeClear(pReq->ntb.schemaRow.pSchema); } static int32_t buildNormalTableBatchReq(int32_t acctId, const SCreateTableStmt* pStmt, const SVgroupInfo* pVgroupInfo, @@ -3890,10 +4005,10 @@ static int32_t buildNormalTableBatchReq(int32_t acctId, const SCreateTableStmt* SVCreateTbReq req = {0}; req.type = TD_NORMAL_TABLE; req.name = strdup(pStmt->tableName); - req.ntb.schema.nCols = LIST_LENGTH(pStmt->pCols); - req.ntb.schema.sver = 1; - req.ntb.schema.pSchema = taosMemoryCalloc(req.ntb.schema.nCols, sizeof(SSchema)); - if (NULL == req.name || NULL == req.ntb.schema.pSchema) { + req.ntb.schemaRow.nCols = LIST_LENGTH(pStmt->pCols); + req.ntb.schemaRow.version = 1; + req.ntb.schemaRow.pSchema = taosMemoryCalloc(req.ntb.schemaRow.nCols, sizeof(SSchema)); + if (NULL == req.name || NULL == req.ntb.schemaRow.pSchema) { destroyCreateTbReq(&req); return TSDB_CODE_OUT_OF_MEMORY; } @@ -3903,7 +4018,7 @@ static int32_t buildNormalTableBatchReq(int32_t acctId, const SCreateTableStmt* SNode* pCol; col_id_t index = 0; FOREACH(pCol, pStmt->pCols) { - toSchema((SColumnDefNode*)pCol, index + 1, req.ntb.schema.pSchema + index); + toSchema((SColumnDefNode*)pCol, index + 1, req.ntb.schemaRow.pSchema + index); ++index; } pBatch->info = *pVgroupInfo; @@ -3957,7 +4072,7 @@ static void destroyCreateTbReqBatch(SVgroupCreateTableBatch* pTbBatch) { taosMemoryFreeClear(pTableReq->name); if (pTableReq->type == TSDB_NORMAL_TABLE) { - taosMemoryFreeClear(pTableReq->ntb.schema.pSchema); + taosMemoryFreeClear(pTableReq->ntb.schemaRow.pSchema); } else if (pTableReq->type == TSDB_CHILD_TABLE) { taosMemoryFreeClear(pTableReq->ctb.pTag); } @@ -4071,9 +4186,7 @@ static int32_t addValToKVRow(STranslateContext* pCxt, SValueNode* pVal, const SS return parseJsontoTagData(pVal->literal, pBuilder, &pCxt->msgBuf, pSchema->colId); } - if (pVal->node.resType.type == TSDB_DATA_TYPE_NULL) { - // todo - } else { + if (pVal->node.resType.type != TSDB_DATA_TYPE_NULL) { tdAddColToKVRow(pBuilder, pSchema->colId, nodesGetValueFromNode(pVal), IS_VAR_DATA_TYPE(pSchema->type) ? varDataTLen(pVal->datum.p) : TYPE_BYTES[pSchema->type]); } @@ -4082,32 +4195,24 @@ static int32_t addValToKVRow(STranslateContext* pCxt, SValueNode* pVal, const SS } static int32_t createValueFromFunction(STranslateContext* pCxt, SFunctionNode* pFunc, SValueNode** pVal) { - if (DEAL_RES_ERROR == translateFunction(pCxt, pFunc)) { - return pCxt->errCode; - } - return scalarCalculateConstants((SNode*)pFunc, (SNode**)pVal); -} - -static int32_t colDataBytesToValueDataBytes(uint8_t type, int32_t bytes) { - if (TSDB_DATA_TYPE_VARCHAR == type || TSDB_DATA_TYPE_BINARY == type || TSDB_DATA_TYPE_VARBINARY == type) { - return bytes - VARSTR_HEADER_SIZE; - } else if (TSDB_DATA_TYPE_NCHAR == type) { - return (bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE; + int32_t code = getFuncInfo(pCxt, pFunc); + if (TSDB_CODE_SUCCESS == code) { + code = scalarCalculateConstants((SNode*)pFunc, (SNode**)pVal); } - return bytes; + return code; } -static SDataType schemaToDataType(SSchema* pSchema) { - SDataType dt = {.type = pSchema->type, .bytes = pSchema->bytes, .precision = 0, .scale = 0}; - dt.bytes = colDataBytesToValueDataBytes(pSchema->type, pSchema->bytes); +static SDataType schemaToDataType(uint8_t precision, SSchema* pSchema) { + SDataType dt = {.type = pSchema->type, .bytes = pSchema->bytes, .precision = precision, .scale = 0}; return dt; } -static int32_t translateTagVal(STranslateContext* pCxt, SSchema* pSchema, SNode* pNode, SValueNode** pVal) { +static int32_t translateTagVal(STranslateContext* pCxt, uint8_t precision, SSchema* pSchema, SNode* pNode, + SValueNode** pVal) { if (QUERY_NODE_FUNCTION == nodeType(pNode)) { return createValueFromFunction(pCxt, (SFunctionNode*)pNode, pVal); } else if (QUERY_NODE_VALUE == nodeType(pNode)) { - return (DEAL_RES_ERROR == translateValueImpl(pCxt, (SValueNode*)pNode, schemaToDataType(pSchema)) + return (DEAL_RES_ERROR == translateValueImpl(pCxt, (SValueNode*)pNode, schemaToDataType(precision, pSchema)) ? pCxt->errCode : TSDB_CODE_SUCCESS); } else { @@ -4138,7 +4243,7 @@ static int32_t buildKVRowForBindTags(STranslateContext* pCxt, SCreateSubTableCla return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TAG_NAME, pCol->colName); } SValueNode* pVal = NULL; - int32_t code = translateTagVal(pCxt, pSchema, pNode, &pVal); + int32_t code = translateTagVal(pCxt, pSuperTableMeta->tableInfo.precision, pSchema, pNode, &pVal); if (TSDB_CODE_SUCCESS == code) { if (NULL == pVal) { pVal = (SValueNode*)pNode; @@ -4168,7 +4273,7 @@ static int32_t buildKVRowForAllTags(STranslateContext* pCxt, SCreateSubTableClau int32_t index = 0; FOREACH(pNode, pStmt->pValsOfTags) { SValueNode* pVal = NULL; - int32_t code = translateTagVal(pCxt, pTagSchema + index, pNode, &pVal); + int32_t code = translateTagVal(pCxt, pSuperTableMeta->tableInfo.precision, pTagSchema + index, pNode, &pVal); if (TSDB_CODE_SUCCESS == code) { if (NULL == pVal) { pVal = (SValueNode*)pNode; @@ -4448,14 +4553,45 @@ static int32_t buildUpdateTagValReq(STranslateContext* pCxt, SAlterTableStmt* pS return TSDB_CODE_OUT_OF_MEMORY; } - if (DEAL_RES_ERROR == translateValueImpl(pCxt, pStmt->pVal, schemaToDataType(pSchema))) { + if (DEAL_RES_ERROR == + translateValueImpl(pCxt, pStmt->pVal, schemaToDataType(pTableMeta->tableInfo.precision, pSchema))) { return pCxt->errCode; } pReq->isNull = (TSDB_DATA_TYPE_NULL == pStmt->pVal->node.resType.type); - pReq->nTagVal = pStmt->pVal->node.resType.bytes; - char* pVal = nodesGetValueFromNode(pStmt->pVal); - pReq->pTagVal = IS_VAR_DATA_TYPE(pStmt->pVal->node.resType.type) ? pVal + VARSTR_HEADER_SIZE : pVal; + if (pStmt->pVal->node.resType.type == TSDB_DATA_TYPE_JSON) { + SKVRowBuilder kvRowBuilder = {0}; + int32_t code = tdInitKVRowBuilder(&kvRowBuilder); + + if (TSDB_CODE_SUCCESS != code) { + return TSDB_CODE_OUT_OF_MEMORY; + } + if (pStmt->pVal->literal && + strlen(pStmt->pVal->literal) > (TSDB_MAX_JSON_TAG_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) { + return buildSyntaxErrMsg(&pCxt->msgBuf, "json string too long than 4095", pStmt->pVal->literal); + } + + code = parseJsontoTagData(pStmt->pVal->literal, &kvRowBuilder, &pCxt->msgBuf, pSchema->colId); + if (TSDB_CODE_SUCCESS != code) { + return code; + } + + SKVRow row = tdGetKVRowFromBuilder(&kvRowBuilder); + if (NULL == row) { + tdDestroyKVRowBuilder(&kvRowBuilder); + return TSDB_CODE_OUT_OF_MEMORY; + } + pReq->nTagVal = kvRowLen(row); + pReq->pTagVal = row; + pStmt->pVal->datum.p = row; // for free + tdDestroyKVRowBuilder(&kvRowBuilder); + } else { + pReq->nTagVal = pStmt->pVal->node.resType.bytes; + if (TSDB_DATA_TYPE_NCHAR == pStmt->pVal->node.resType.type) { + pReq->nTagVal = pReq->nTagVal * TSDB_NCHAR_SIZE; + } + pReq->pTagVal = nodesGetValueFromNode(pStmt->pVal); + } return TSDB_CODE_SUCCESS; } @@ -4479,6 +4615,9 @@ static int32_t buildAddColReq(STranslateContext* pCxt, SAlterTableStmt* pStmt, S static int32_t buildDropColReq(STranslateContext* pCxt, SAlterTableStmt* pStmt, STableMeta* pTableMeta, SVAlterTbReq* pReq) { + if (2 == getNumOfColumns(pTableMeta)) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_DROP_COL); + } SSchema* pSchema = getColSchema(pTableMeta, pStmt->colName); if (NULL == pSchema) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COLUMN, pStmt->colName); @@ -4649,7 +4788,26 @@ static int32_t rewriteAlterTable(STranslateContext* pCxt, SQuery* pQuery) { return code; } + if (pStmt->dataType.type == TSDB_DATA_TYPE_JSON && pStmt->alterType == TSDB_ALTER_TABLE_ADD_TAG) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_ONE_JSON_TAG); + } + + if (pStmt->dataType.type == TSDB_DATA_TYPE_JSON && pStmt->alterType == TSDB_ALTER_TABLE_ADD_COLUMN) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COL_JSON); + } + + if (getNumOfTags(pTableMeta) == 1 && pStmt->alterType == TSDB_ALTER_TABLE_DROP_TAG) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE, + "can not drop tag if there is only one tag"); + } + if (TSDB_SUPER_TABLE == pTableMeta->tableType) { + SSchema* pTagsSchema = getTableTagSchema(pTableMeta); + if (getNumOfTags(pTableMeta) == 1 && pTagsSchema->type == TSDB_DATA_TYPE_JSON && + (pStmt->alterType == TSDB_ALTER_TABLE_ADD_TAG || pStmt->alterType == TSDB_ALTER_TABLE_DROP_TAG || + pStmt->alterType == TSDB_ALTER_TABLE_UPDATE_TAG_BYTES)) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_ONE_JSON_TAG); + } return TSDB_CODE_SUCCESS; } else if (TSDB_CHILD_TABLE != pTableMeta->tableType && TSDB_NORMAL_TABLE != pTableMeta->tableType) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE); @@ -4783,7 +4941,7 @@ static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery) { int32_t translate(SParseContext* pParseCxt, SQuery* pQuery) { STranslateContext cxt = {0}; - int32_t code = initTranslateContext(pParseCxt, &cxt); + int32_t code = initTranslateContext(pParseCxt, pQuery->pMetaCache, &cxt); if (TSDB_CODE_SUCCESS == code) { code = fmFuncMgtInit(); } diff --git a/source/libs/parser/src/parUtil.c b/source/libs/parser/src/parUtil.c index fe21915b1ae100948ab2b485d799456aafbda639..9882440bbb632e2d989da9f8a5f2be880bb37eab 100644 --- a/source/libs/parser/src/parUtil.c +++ b/source/libs/parser/src/parUtil.c @@ -15,6 +15,9 @@ #include "parUtil.h" #include "cJSON.h" +#include "querynodes.h" + +#define USER_AUTH_KEY_MAX_LEN TSDB_USER_LEN + TSDB_DB_FNAME_LEN + 2 static char* getSyntaxErrFormat(int32_t errCode) { switch (errCode) { @@ -169,6 +172,12 @@ static char* getSyntaxErrFormat(int32_t errCode) { "And, cannot be mixed with other non scalar functions or columns."; case TSDB_CODE_PAR_NOT_ALLOWED_WIN_QUERY: return "Window query not supported, since the result of subquery not include valid timestamp column"; + case TSDB_CODE_PAR_INVALID_DROP_COL: + return "No columns can be dropped"; + case TSDB_CODE_PAR_INVALID_COL_JSON: + return "Only tag can be json type"; + case TSDB_CODE_PAR_VALUE_TOO_LONG: + return "Value too long for column/tag: %s"; case TSDB_CODE_OUT_OF_MEMORY: return "Out of memory"; default: @@ -249,17 +258,8 @@ STableComInfo getTableInfo(const STableMeta* pTableMeta) { return pTableMeta->tableInfo; } -static uint32_t getTableMetaSize(const STableMeta* pTableMeta) { - int32_t totalCols = 0; - if (pTableMeta->tableInfo.numOfColumns >= 0) { - totalCols = pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags; - } - - return sizeof(STableMeta) + totalCols * sizeof(SSchema); -} - STableMeta* tableMetaDup(const STableMeta* pTableMeta) { - size_t size = getTableMetaSize(pTableMeta); + size_t size = TABLE_META_SIZE(pTableMeta); STableMeta* p = taosMemoryMalloc(size); memcpy(p, pTableMeta, size); @@ -322,11 +322,11 @@ static bool isValidateTag(char* input) { return true; } -int parseJsontoTagData(const char* json, SKVRowBuilder* kvRowBuilder, SMsgBuf* pMsgBuf, int16_t startColId) { +int32_t parseJsontoTagData(const char* json, SKVRowBuilder* kvRowBuilder, SMsgBuf* pMsgBuf, int16_t startColId) { // set json NULL data uint8_t jsonNULL = TSDB_DATA_TYPE_NULL; - int jsonIndex = startColId + 1; - if (!json || strcasecmp(json, TSDB_DATA_NULL_STR_L) == 0) { + int32_t jsonIndex = startColId + 1; + if (!json || strtrim((char*)json) == 0 || strcasecmp(json, TSDB_DATA_NULL_STR_L) == 0) { tdAddColToKVRow(kvRowBuilder, jsonIndex, &jsonNULL, CHAR_BYTES); return TSDB_CODE_SUCCESS; } @@ -337,15 +337,15 @@ int parseJsontoTagData(const char* json, SKVRowBuilder* kvRowBuilder, SMsgBuf* p return buildSyntaxErrMsg(pMsgBuf, "json parse error", json); } - int size = cJSON_GetArraySize(root); + int32_t size = cJSON_GetArraySize(root); if (!cJSON_IsObject(root)) { return buildSyntaxErrMsg(pMsgBuf, "json error invalide value", json); } - int retCode = 0; + int32_t retCode = 0; char* tagKV = NULL; SHashObj* keyHash = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, false); - for (int i = 0; i < size; i++) { + for (int32_t i = 0; i < size; i++) { cJSON* item = cJSON_GetArrayItem(root, i); if (!item) { qError("json inner error:%d", i); @@ -358,12 +358,12 @@ int parseJsontoTagData(const char* json, SKVRowBuilder* kvRowBuilder, SMsgBuf* p retCode = buildSyntaxErrMsg(pMsgBuf, "json key not validate", jsonKey); goto end; } - // if(strlen(jsonKey) > TSDB_MAX_JSON_KEY_LEN){ - // tscError("json key too long error"); - // retCode = tscSQLSyntaxErrMsg(errMsg, "json key too long, more than 256", NULL); - // goto end; - // } size_t keyLen = strlen(jsonKey); + if (keyLen > TSDB_MAX_JSON_KEY_LEN) { + qError("json key too long error"); + retCode = buildSyntaxErrMsg(pMsgBuf, "json key too long, more than 256", jsonKey); + goto end; + } if (keyLen == 0 || taosHashGet(keyHash, jsonKey, keyLen) != NULL) { continue; } @@ -441,4 +441,410 @@ end: taosHashCleanup(keyHash); cJSON_Delete(root); return retCode; -} \ No newline at end of file +} + +static int32_t userAuthToString(int32_t acctId, const char* pUser, const char* pDb, AUTH_TYPE type, char* pStr) { + return sprintf(pStr, "%s*%d.%s*%d", pUser, acctId, pDb, type); +} + +static int32_t userAuthToStringExt(const char* pUser, const char* pDbFName, AUTH_TYPE type, char* pStr) { + return sprintf(pStr, "%s*%s*%d", pUser, pDbFName, type); +} + +static void stringToUserAuth(const char* pStr, int32_t len, SUserAuthInfo* pUserAuth) { + char* p1 = strchr(pStr, '*'); + strncpy(pUserAuth->user, pStr, p1 - pStr); + ++p1; + char* p2 = strchr(p1, '*'); + strncpy(pUserAuth->dbFName, p1, p2 - p1); + ++p2; + char buf[10] = {0}; + strncpy(buf, p2, len - (p2 - pStr)); + pUserAuth->type = taosStr2Int32(buf, NULL, 10); +} + +static int32_t buildTableReq(SHashObj* pTablesHash, SArray** pTables) { + if (NULL != pTablesHash) { + *pTables = taosArrayInit(taosHashGetSize(pTablesHash), sizeof(SName)); + if (NULL == *pTables) { + return TSDB_CODE_OUT_OF_MEMORY; + } + void* p = taosHashIterate(pTablesHash, NULL); + while (NULL != p) { + size_t len = 0; + char* pKey = taosHashGetKey(p, &len); + char fullName[TSDB_TABLE_FNAME_LEN] = {0}; + strncpy(fullName, pKey, len); + SName name = {0}; + tNameFromString(&name, fullName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); + taosArrayPush(*pTables, &name); + p = taosHashIterate(pTablesHash, p); + } + } + return TSDB_CODE_SUCCESS; +} + +static int32_t buildDbReq(SHashObj* pDbsHash, SArray** pDbs) { + if (NULL != pDbsHash) { + *pDbs = taosArrayInit(taosHashGetSize(pDbsHash), TSDB_DB_FNAME_LEN); + if (NULL == *pDbs) { + return TSDB_CODE_OUT_OF_MEMORY; + } + void* p = taosHashIterate(pDbsHash, NULL); + while (NULL != p) { + size_t len = 0; + char* pKey = taosHashGetKey(p, &len); + char fullName[TSDB_DB_FNAME_LEN] = {0}; + strncpy(fullName, pKey, len); + taosArrayPush(*pDbs, fullName); + p = taosHashIterate(pDbsHash, p); + } + } + return TSDB_CODE_SUCCESS; +} + +static int32_t buildTableMetaReq(SHashObj* pTableMetaHash, SArray** pTableMeta) { + return buildTableReq(pTableMetaHash, pTableMeta); +} + +static int32_t buildDbVgroupReq(SHashObj* pDbVgroupHash, SArray** pDbVgroup) { + return buildDbReq(pDbVgroupHash, pDbVgroup); +} + +static int32_t buildTableVgroupReq(SHashObj* pTableVgroupHash, SArray** pTableVgroup) { + return buildTableReq(pTableVgroupHash, pTableVgroup); +} + +static int32_t buildDbCfgReq(SHashObj* pDbCfgHash, SArray** pDbCfg) { return buildDbReq(pDbCfgHash, pDbCfg); } + +static int32_t buildUserAuthReq(SHashObj* pUserAuthHash, SArray** pUserAuth) { + if (NULL != pUserAuthHash) { + *pUserAuth = taosArrayInit(taosHashGetSize(pUserAuthHash), sizeof(SUserAuthInfo)); + if (NULL == *pUserAuth) { + return TSDB_CODE_OUT_OF_MEMORY; + } + void* p = taosHashIterate(pUserAuthHash, NULL); + while (NULL != p) { + size_t len = 0; + char* pKey = taosHashGetKey(p, &len); + SUserAuthInfo userAuth = {0}; + stringToUserAuth(pKey, len, &userAuth); + taosArrayPush(*pUserAuth, &userAuth); + p = taosHashIterate(pUserAuthHash, p); + } + } + return TSDB_CODE_SUCCESS; +} + +static int32_t buildUdfReq(SHashObj* pUdfHash, SArray** pUdf) { + if (NULL != pUdfHash) { + *pUdf = taosArrayInit(taosHashGetSize(pUdfHash), TSDB_FUNC_NAME_LEN); + if (NULL == *pUdf) { + return TSDB_CODE_OUT_OF_MEMORY; + } + void* p = taosHashIterate(pUdfHash, NULL); + while (NULL != p) { + size_t len = 0; + char* pFunc = taosHashGetKey(p, &len); + char func[TSDB_FUNC_NAME_LEN] = {0}; + strncpy(func, pFunc, len); + taosArrayPush(*pUdf, func); + p = taosHashIterate(pUdfHash, p); + } + } + return TSDB_CODE_SUCCESS; +} + +int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq) { + int32_t code = buildTableMetaReq(pMetaCache->pTableMeta, &pCatalogReq->pTableMeta); + if (TSDB_CODE_SUCCESS == code) { + code = buildDbVgroupReq(pMetaCache->pDbVgroup, &pCatalogReq->pDbVgroup); + } + if (TSDB_CODE_SUCCESS == code) { + code = buildTableVgroupReq(pMetaCache->pTableVgroup, &pCatalogReq->pTableHash); + } + if (TSDB_CODE_SUCCESS == code) { + code = buildDbCfgReq(pMetaCache->pDbCfg, &pCatalogReq->pDbCfg); + } + if (TSDB_CODE_SUCCESS == code) { + code = buildUserAuthReq(pMetaCache->pUserAuth, &pCatalogReq->pUser); + } + if (TSDB_CODE_SUCCESS == code) { + code = buildUdfReq(pMetaCache->pUdf, &pCatalogReq->pUdf); + } + return code; +} + +static int32_t putTableMetaToCache(const SArray* pTableMetaReq, const SArray* pTableMetaData, SHashObj* pTableMeta) { + int32_t ntables = taosArrayGetSize(pTableMetaReq); + for (int32_t i = 0; i < ntables; ++i) { + char fullName[TSDB_TABLE_FNAME_LEN]; + tNameExtractFullName(taosArrayGet(pTableMetaReq, i), fullName); + if (TSDB_CODE_SUCCESS != + taosHashPut(pTableMeta, fullName, strlen(fullName), taosArrayGet(pTableMetaData, i), POINTER_BYTES)) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + return TSDB_CODE_SUCCESS; +} + +static int32_t putDbVgroupToCache(const SArray* pDbVgroupReq, const SArray* pDbVgroupData, SHashObj* pDbVgroup) { + int32_t nvgs = taosArrayGetSize(pDbVgroupReq); + for (int32_t i = 0; i < nvgs; ++i) { + char* pDbFName = taosArrayGet(pDbVgroupReq, i); + if (TSDB_CODE_SUCCESS != + taosHashPut(pDbVgroup, pDbFName, strlen(pDbFName), taosArrayGet(pDbVgroupData, i), POINTER_BYTES)) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + return TSDB_CODE_SUCCESS; +} + +static int32_t putTableVgroupToCache(const SArray* pTableVgroupReq, const SArray* pTableVgroupData, + SHashObj* pTableVgroup) { + int32_t ntables = taosArrayGetSize(pTableVgroupReq); + for (int32_t i = 0; i < ntables; ++i) { + char fullName[TSDB_TABLE_FNAME_LEN]; + tNameExtractFullName(taosArrayGet(pTableVgroupReq, i), fullName); + SVgroupInfo* pInfo = taosArrayGet(pTableVgroupData, i); + if (TSDB_CODE_SUCCESS != taosHashPut(pTableVgroup, fullName, strlen(fullName), &pInfo, POINTER_BYTES)) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + return TSDB_CODE_SUCCESS; +} + +static int32_t putDbCfgToCache(const SArray* pDbCfgReq, const SArray* pDbCfgData, SHashObj* pDbCfg) { + int32_t nvgs = taosArrayGetSize(pDbCfgReq); + for (int32_t i = 0; i < nvgs; ++i) { + char* pDbFName = taosArrayGet(pDbCfgReq, i); + SDbCfgInfo* pInfo = taosArrayGet(pDbCfgData, i); + if (TSDB_CODE_SUCCESS != taosHashPut(pDbCfg, pDbFName, strlen(pDbFName), &pInfo, POINTER_BYTES)) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + return TSDB_CODE_SUCCESS; +} + +static int32_t putUserAuthToCache(const SArray* pUserAuthReq, const SArray* pUserAuthData, SHashObj* pUserAuth) { + int32_t nvgs = taosArrayGetSize(pUserAuthReq); + for (int32_t i = 0; i < nvgs; ++i) { + SUserAuthInfo* pUser = taosArrayGet(pUserAuthReq, i); + char key[USER_AUTH_KEY_MAX_LEN] = {0}; + int32_t len = userAuthToStringExt(pUser->user, pUser->dbFName, pUser->type, key); + if (TSDB_CODE_SUCCESS != taosHashPut(pUserAuth, key, len, taosArrayGet(pUserAuthData, i), sizeof(bool))) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + return TSDB_CODE_SUCCESS; +} + +static int32_t putUdfToCache(const SArray* pUdfReq, const SArray* pUdfData, SHashObj* pUdf) { + int32_t num = taosArrayGetSize(pUdfReq); + for (int32_t i = 0; i < num; ++i) { + char* pFunc = taosArrayGet(pUdfReq, i); + SFuncInfo* pInfo = taosArrayGet(pUdfData, i); + if (TSDB_CODE_SUCCESS != taosHashPut(pUdf, pFunc, strlen(pFunc), &pInfo, POINTER_BYTES)) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + return TSDB_CODE_SUCCESS; +} + +int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache) { + int32_t code = putTableMetaToCache(pCatalogReq->pTableMeta, pMetaData->pTableMeta, pMetaCache->pTableMeta); + if (TSDB_CODE_SUCCESS == code) { + code = putDbVgroupToCache(pCatalogReq->pDbVgroup, pMetaData->pDbVgroup, pMetaCache->pDbVgroup); + } + if (TSDB_CODE_SUCCESS == code) { + code = putTableVgroupToCache(pCatalogReq->pTableHash, pMetaData->pTableHash, pMetaCache->pTableVgroup); + } + if (TSDB_CODE_SUCCESS == code) { + code = putDbCfgToCache(pCatalogReq->pDbCfg, pMetaData->pDbCfg, pMetaCache->pDbCfg); + } + if (TSDB_CODE_SUCCESS == code) { + code = putUserAuthToCache(pCatalogReq->pUser, pMetaData->pUser, pMetaCache->pUserAuth); + } + if (TSDB_CODE_SUCCESS == code) { + code = putUdfToCache(pCatalogReq->pUdf, pMetaData->pUdfList, pMetaCache->pUdf); + } + return code; +} + +static int32_t reserveTableReqInCacheImpl(const char* pTbFName, int32_t len, SHashObj** pTables) { + if (NULL == *pTables) { + *pTables = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + if (NULL == *pTables) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + return taosHashPut(*pTables, pTbFName, len, &pTables, POINTER_BYTES); +} + +static int32_t reserveTableReqInCache(int32_t acctId, const char* pDb, const char* pTable, SHashObj** pTables) { + char fullName[TSDB_TABLE_FNAME_LEN]; + int32_t len = snprintf(fullName, sizeof(fullName), "%d.%s.%s", acctId, pDb, pTable); + return reserveTableReqInCacheImpl(fullName, len, pTables); +} + +int32_t reserveTableMetaInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache) { + return reserveTableReqInCache(acctId, pDb, pTable, &pMetaCache->pTableMeta); +} + +int32_t reserveTableMetaInCacheExt(const SName* pName, SParseMetaCache* pMetaCache) { + char fullName[TSDB_TABLE_FNAME_LEN]; + tNameExtractFullName(pName, fullName); + return reserveTableReqInCacheImpl(fullName, strlen(fullName), &pMetaCache->pTableMeta); +} + +int32_t getTableMetaFromCache(SParseMetaCache* pMetaCache, const SName* pName, STableMeta** pMeta) { + char fullName[TSDB_TABLE_FNAME_LEN]; + tNameExtractFullName(pName, fullName); + STableMeta** pRes = taosHashGet(pMetaCache->pTableMeta, fullName, strlen(fullName)); + if (NULL == pRes || NULL == *pRes) { + return TSDB_CODE_PAR_INTERNAL_ERROR; + } + *pMeta = tableMetaDup(*pRes); + if (NULL == *pMeta) { + return TSDB_CODE_OUT_OF_MEMORY; + } + return TSDB_CODE_SUCCESS; +} + +static int32_t reserveDbReqInCache(int32_t acctId, const char* pDb, SHashObj** pDbs) { + if (NULL == *pDbs) { + *pDbs = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + if (NULL == *pDbs) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + char fullName[TSDB_TABLE_FNAME_LEN]; + int32_t len = snprintf(fullName, sizeof(fullName), "%d.%s", acctId, pDb); + return taosHashPut(*pDbs, fullName, len, &pDbs, POINTER_BYTES); +} + +int32_t reserveDbVgInfoInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache) { + return reserveDbReqInCache(acctId, pDb, &pMetaCache->pDbVgroup); +} + +int32_t getDbVgInfoFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SArray** pVgInfo) { + SArray** pRes = taosHashGet(pMetaCache->pDbVgroup, pDbFName, strlen(pDbFName)); + if (NULL == pRes) { + return TSDB_CODE_PAR_INTERNAL_ERROR; + } + // *pRes is null, which is a legal value, indicating that the user DB has not been created + if (NULL != *pRes) { + *pVgInfo = taosArrayDup(*pRes); + if (NULL == *pVgInfo) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + return TSDB_CODE_SUCCESS; +} + +int32_t reserveTableVgroupInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache) { + return reserveTableReqInCache(acctId, pDb, pTable, &pMetaCache->pTableVgroup); +} + +int32_t reserveTableVgroupInCacheExt(const SName* pName, SParseMetaCache* pMetaCache) { + char fullName[TSDB_TABLE_FNAME_LEN]; + tNameExtractFullName(pName, fullName); + return reserveTableReqInCacheImpl(fullName, strlen(fullName), &pMetaCache->pTableVgroup); +} + +int32_t getTableVgroupFromCache(SParseMetaCache* pMetaCache, const SName* pName, SVgroupInfo* pVgroup) { + char fullName[TSDB_TABLE_FNAME_LEN]; + tNameExtractFullName(pName, fullName); + SVgroupInfo** pRes = taosHashGet(pMetaCache->pTableVgroup, fullName, strlen(fullName)); + if (NULL == pRes || NULL == *pRes) { + return TSDB_CODE_PAR_INTERNAL_ERROR; + } + memcpy(pVgroup, *pRes, sizeof(SVgroupInfo)); + return TSDB_CODE_SUCCESS; +} + +int32_t reserveDbVgVersionInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache) { + return reserveDbReqInCache(acctId, pDb, &pMetaCache->pDbCfg); +} + +int32_t getDbVgVersionFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, int32_t* pVersion, int64_t* pDbId, + int32_t* pTableNum) { + SDbInfo** pRes = taosHashGet(pMetaCache->pDbCfg, pDbFName, strlen(pDbFName)); + if (NULL == pRes || NULL == *pRes) { + return TSDB_CODE_PAR_INTERNAL_ERROR; + } + *pVersion = (*pRes)->vgVer; + *pDbId = (*pRes)->dbId; + *pTableNum = (*pRes)->tbNum; + return TSDB_CODE_SUCCESS; +} + +int32_t reserveDbCfgInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache) { + return reserveDbReqInCache(acctId, pDb, &pMetaCache->pDbCfg); +} + +int32_t getDbCfgFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SDbCfgInfo* pInfo) { + SDbCfgInfo** pRes = taosHashGet(pMetaCache->pDbCfg, pDbFName, strlen(pDbFName)); + if (NULL == pRes || NULL == *pRes) { + return TSDB_CODE_PAR_INTERNAL_ERROR; + } + memcpy(pInfo, *pRes, sizeof(SDbCfgInfo)); + return TSDB_CODE_SUCCESS; +} + +static int32_t reserveUserAuthInCacheImpl(const char* pKey, int32_t len, SParseMetaCache* pMetaCache) { + if (NULL == pMetaCache->pUserAuth) { + pMetaCache->pUserAuth = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + if (NULL == pMetaCache->pUserAuth) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + bool pass = false; + return taosHashPut(pMetaCache->pUserAuth, pKey, len, &pass, sizeof(pass)); +} + +int32_t reserveUserAuthInCache(int32_t acctId, const char* pUser, const char* pDb, AUTH_TYPE type, + SParseMetaCache* pMetaCache) { + char key[USER_AUTH_KEY_MAX_LEN] = {0}; + int32_t len = userAuthToString(acctId, pUser, pDb, type, key); + return reserveUserAuthInCacheImpl(key, len, pMetaCache); +} + +int32_t reserveUserAuthInCacheExt(const char* pUser, const SName* pName, AUTH_TYPE type, SParseMetaCache* pMetaCache) { + char dbFName[TSDB_DB_FNAME_LEN] = {0}; + tNameGetFullDbName(pName, dbFName); + char key[USER_AUTH_KEY_MAX_LEN] = {0}; + int32_t len = userAuthToStringExt(pUser, dbFName, type, key); + return reserveUserAuthInCacheImpl(key, len, pMetaCache); +} + +int32_t getUserAuthFromCache(SParseMetaCache* pMetaCache, const char* pUser, const char* pDbFName, AUTH_TYPE type, + bool* pPass) { + char key[USER_AUTH_KEY_MAX_LEN] = {0}; + int32_t len = userAuthToStringExt(pUser, pDbFName, type, key); + bool* pRes = taosHashGet(pMetaCache->pUserAuth, key, len); + if (NULL == pRes) { + return TSDB_CODE_PAR_INTERNAL_ERROR; + } + *pPass = *pRes; + return TSDB_CODE_SUCCESS; +} + +int32_t reserveUdfInCache(const char* pFunc, SParseMetaCache* pMetaCache) { + if (NULL == pMetaCache->pUdf) { + pMetaCache->pUdf = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + if (NULL == pMetaCache->pUdf) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + return taosHashPut(pMetaCache->pUdf, pFunc, strlen(pFunc), &pMetaCache, POINTER_BYTES); +} + +int32_t getUdfInfoFromCache(SParseMetaCache* pMetaCache, const char* pFunc, SFuncInfo* pInfo) { + SFuncInfo** pRes = taosHashGet(pMetaCache->pUdf, pFunc, strlen(pFunc)); + if (NULL == pRes || NULL == *pRes) { + return TSDB_CODE_PAR_INTERNAL_ERROR; + } + memcpy(pInfo, *pRes, sizeof(SFuncInfo)); + return TSDB_CODE_SUCCESS; +} diff --git a/source/libs/parser/src/parser.c b/source/libs/parser/src/parser.c index 688e20063a4f02f3b077b116e1b702c428562c71..bb70458f983832533fe8fa18ab58b58ca38558a6 100644 --- a/source/libs/parser/src/parser.c +++ b/source/libs/parser/src/parser.c @@ -19,7 +19,7 @@ #include "parInt.h" #include "parToken.h" -bool isInsertSql(const char* pStr, size_t length) { +bool qIsInsertSql(const char* pStr, size_t length) { if (NULL == pStr) { return false; } @@ -34,22 +34,35 @@ bool isInsertSql(const char* pStr, size_t length) { } while (1); } -static int32_t parseSqlIntoAst(SParseContext* pCxt, SQuery** pQuery) { - int32_t code = parse(pCxt, pQuery); - if (TSDB_CODE_SUCCESS == code) { - code = authenticate(pCxt, *pQuery); - } +static int32_t analyseSemantic(SParseContext* pCxt, SQuery* pQuery) { + int32_t code = authenticate(pCxt, pQuery); - if (TSDB_CODE_SUCCESS == code && (*pQuery)->placeholderNum > 0) { - TSWAP((*pQuery)->pPrepareRoot, (*pQuery)->pRoot); + if (TSDB_CODE_SUCCESS == code && pQuery->placeholderNum > 0) { + TSWAP(pQuery->pPrepareRoot, pQuery->pRoot); return TSDB_CODE_SUCCESS; } if (TSDB_CODE_SUCCESS == code) { - code = translate(pCxt, *pQuery); + code = translate(pCxt, pQuery); } if (TSDB_CODE_SUCCESS == code) { - code = calculateConstant(pCxt, *pQuery); + code = calculateConstant(pCxt, pQuery); + } + return code; +} + +static int32_t parseSqlIntoAst(SParseContext* pCxt, SQuery** pQuery) { + int32_t code = parse(pCxt, pQuery); + if (TSDB_CODE_SUCCESS == code) { + code = analyseSemantic(pCxt, *pQuery); + } + return code; +} + +static int32_t parseSqlSyntax(SParseContext* pCxt, SQuery** pQuery) { + int32_t code = parse(pCxt, pQuery); + if (TSDB_CODE_SUCCESS == code) { + code = collectMetaKey(pCxt, *pQuery); } return code; } @@ -93,6 +106,7 @@ static int32_t setValueByBindParam(SValueNode* pVal, TAOS_MULTI_BIND* pParam) { } varDataSetLen(pVal->datum.p, pVal->node.resType.bytes); strncpy(varDataVal(pVal->datum.p), (const char*)pParam->buffer, pVal->node.resType.bytes); + pVal->node.resType.bytes += VARSTR_HEADER_SIZE; break; case TSDB_DATA_TYPE_NCHAR: { pVal->node.resType.bytes *= TSDB_NCHAR_SIZE; @@ -107,7 +121,7 @@ static int32_t setValueByBindParam(SValueNode* pVal, TAOS_MULTI_BIND* pParam) { return errno; } varDataSetLen(pVal->datum.p, output); - pVal->node.resType.bytes = output; + pVal->node.resType.bytes = output + VARSTR_HEADER_SIZE; break; } case TSDB_DATA_TYPE_TIMESTAMP: @@ -169,7 +183,7 @@ static void rewriteExprAlias(SNode* pRoot) { int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery) { int32_t code = TSDB_CODE_SUCCESS; - if (isInsertSql(pCxt->pSql, pCxt->sqlLen)) { + if (qIsInsertSql(pCxt->pSql, pCxt->sqlLen)) { code = parseInsertSql(pCxt, pQuery); } else { code = parseSqlIntoAst(pCxt, pQuery); @@ -178,12 +192,47 @@ int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery) { return code; } +int32_t qParseSqlSyntax(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq* pCatalogReq) { + int32_t code = TSDB_CODE_SUCCESS; + if (qIsInsertSql(pCxt->pSql, pCxt->sqlLen)) { + code = parseInsertSyntax(pCxt, pQuery); + } else { + code = parseSqlSyntax(pCxt, pQuery); + } + if (TSDB_CODE_SUCCESS == code) { + code = buildCatalogReq((*pQuery)->pMetaCache, pCatalogReq); + } + terrno = code; + return code; +} + +int32_t qAnalyseSqlSemantic(SParseContext* pCxt, const struct SCatalogReq* pCatalogReq, + const struct SMetaData* pMetaData, SQuery* pQuery) { + int32_t code = putMetaDataToCache(pCatalogReq, pMetaData, pQuery->pMetaCache); + if (NULL == pQuery->pRoot) { + return parseInsertSql(pCxt, &pQuery); + } + return analyseSemantic(pCxt, pQuery); +} + void qDestroyQuery(SQuery* pQueryNode) { nodesDestroyNode(pQueryNode); } int32_t qExtractResultSchema(const SNode* pRoot, int32_t* numOfCols, SSchema** pSchema) { return extractResultSchema(pRoot, numOfCols, pSchema); } +int32_t qSetSTableIdForRSma(SNode* pStmt, int64_t uid) { + if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) { + SNode* pTable = ((SSelectStmt*)pStmt)->pFromTable; + if (QUERY_NODE_REAL_TABLE == nodeType(pTable)) { + ((SRealTableNode*)pTable)->pMeta->uid = uid; + ((SRealTableNode*)pTable)->pMeta->suid = uid; + return TSDB_CODE_SUCCESS; + } + } + return TSDB_CODE_FAILED; +} + int32_t qStmtBindParams(SQuery* pQuery, TAOS_MULTI_BIND* pParams, int32_t colIdx) { int32_t code = TSDB_CODE_SUCCESS; diff --git a/source/libs/parser/src/sql.c b/source/libs/parser/src/sql.c index 0854bb83e471151e83efd066192ee576561b28ee..262abac54bbd1c1ea9847c05507bb13fdedb0462 100644 --- a/source/libs/parser/src/sql.c +++ b/source/libs/parser/src/sql.c @@ -100,25 +100,25 @@ #endif /************* Begin control #defines *****************************************/ #define YYCODETYPE unsigned short int -#define YYNOCODE 358 +#define YYNOCODE 361 #define YYACTIONTYPE unsigned short int #define ParseTOKENTYPE SToken typedef union { int yyinit; ParseTOKENTYPE yy0; - EOrder yy14; - ENullOrder yy17; - SNodeList* yy60; - SToken yy105; - int32_t yy140; - SNode* yy172; - EFillMode yy202; - SDataType yy248; - EOperatorType yy572; - int64_t yy593; - SAlterOption yy609; - bool yy617; - EJoinType yy636; + EFillMode yy18; + SAlterOption yy25; + SToken yy53; + EOperatorType yy136; + int32_t yy158; + ENullOrder yy185; + SNodeList* yy236; + EJoinType yy342; + EOrder yy430; + int64_t yy435; + SDataType yy450; + bool yy603; + SNode* yy636; } YYMINORTYPE; #ifndef YYSTACKDEPTH #define YYSTACKDEPTH 100 @@ -134,17 +134,17 @@ typedef union { #define ParseCTX_FETCH #define ParseCTX_STORE #define YYFALLBACK 1 -#define YYNSTATE 605 -#define YYNRULE 452 -#define YYNTOKEN 238 -#define YY_MAX_SHIFT 604 -#define YY_MIN_SHIFTREDUCE 893 -#define YY_MAX_SHIFTREDUCE 1344 -#define YY_ERROR_ACTION 1345 -#define YY_ACCEPT_ACTION 1346 -#define YY_NO_ACTION 1347 -#define YY_MIN_REDUCE 1348 -#define YY_MAX_REDUCE 1799 +#define YYNSTATE 611 +#define YYNRULE 455 +#define YYNTOKEN 240 +#define YY_MAX_SHIFT 610 +#define YY_MIN_SHIFTREDUCE 901 +#define YY_MAX_SHIFTREDUCE 1355 +#define YY_ERROR_ACTION 1356 +#define YY_ACCEPT_ACTION 1357 +#define YY_NO_ACTION 1358 +#define YY_MIN_REDUCE 1359 +#define YY_MAX_REDUCE 1813 /************* End control #defines *******************************************/ #define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0]))) @@ -211,601 +211,604 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (2154) +#define YY_ACTTAB_COUNT (2153) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 1467, 1777, 1777, 1646, 383, 1634, 384, 1380, 292, 11, - /* 10 */ 10, 343, 35, 33, 1776, 146, 24, 923, 1774, 1774, - /* 20 */ 301, 391, 1159, 384, 1380, 1631, 36, 34, 32, 31, - /* 30 */ 30, 1662, 26, 36, 34, 32, 31, 30, 518, 503, - /* 40 */ 1627, 1633, 36, 34, 32, 31, 30, 1157, 1346, 502, - /* 50 */ 1777, 522, 130, 1617, 1360, 927, 928, 518, 14, 483, - /* 60 */ 35, 33, 1285, 145, 1165, 28, 223, 1774, 301, 1675, - /* 70 */ 1159, 349, 80, 1647, 505, 1649, 1650, 501, 77, 522, - /* 80 */ 1, 62, 1715, 1777, 1181, 519, 273, 1711, 518, 1261, - /* 90 */ 1634, 113, 398, 309, 108, 1157, 1775, 104, 1777, 1470, - /* 100 */ 1774, 1646, 601, 1473, 419, 271, 14, 317, 35, 33, - /* 110 */ 1631, 147, 1165, 1158, 1478, 1774, 301, 38, 1159, 36, - /* 120 */ 34, 32, 31, 30, 388, 1627, 1633, 56, 2, 1662, - /* 130 */ 1181, 36, 34, 32, 31, 30, 522, 503, 36, 34, - /* 140 */ 32, 31, 30, 1157, 55, 1523, 1777, 502, 39, 131, - /* 150 */ 601, 1617, 291, 1435, 14, 1371, 1160, 1521, 1662, 145, - /* 160 */ 1165, 1158, 559, 1774, 1450, 274, 472, 1675, 140, 1341, - /* 170 */ 132, 1647, 505, 1649, 1650, 501, 2, 522, 1163, 1164, - /* 180 */ 1517, 1209, 1210, 1212, 1213, 1214, 1215, 1216, 498, 520, - /* 190 */ 1224, 1225, 1226, 1227, 1228, 1229, 1247, 1410, 601, 519, - /* 200 */ 1299, 471, 1183, 55, 1160, 1617, 1456, 1196, 148, 1158, - /* 210 */ 473, 347, 447, 94, 484, 1791, 93, 92, 91, 90, - /* 220 */ 89, 88, 87, 86, 85, 1729, 1163, 1164, 1478, 1209, - /* 230 */ 1210, 1212, 1213, 1214, 1215, 1216, 498, 520, 1224, 1225, - /* 240 */ 1226, 1227, 1228, 1229, 1454, 148, 1248, 479, 1523, 1726, - /* 250 */ 1340, 1777, 1160, 597, 596, 306, 1309, 433, 432, 55, - /* 260 */ 1521, 1523, 431, 398, 145, 109, 428, 1253, 1774, 427, - /* 270 */ 426, 425, 148, 1522, 1163, 1164, 112, 1209, 1210, 1212, - /* 280 */ 1213, 1214, 1215, 1216, 498, 520, 1224, 1225, 1226, 1227, - /* 290 */ 1228, 1229, 35, 33, 1349, 465, 1307, 1308, 1310, 1311, - /* 300 */ 301, 556, 1159, 27, 299, 1242, 1243, 1244, 1245, 1246, - /* 310 */ 1250, 1251, 1252, 110, 939, 94, 62, 1469, 93, 92, - /* 320 */ 91, 90, 89, 88, 87, 86, 85, 1157, 143, 1722, - /* 330 */ 1723, 148, 1727, 519, 1184, 519, 479, 1631, 1474, 417, - /* 340 */ 35, 33, 1230, 506, 1165, 348, 304, 104, 301, 1568, - /* 350 */ 1159, 55, 1627, 1633, 424, 36, 34, 32, 31, 30, - /* 360 */ 8, 479, 1478, 522, 1478, 112, 36, 34, 32, 31, - /* 370 */ 30, 1556, 433, 432, 1635, 1157, 154, 431, 156, 1646, - /* 380 */ 109, 428, 601, 519, 427, 426, 425, 148, 35, 33, - /* 390 */ 112, 556, 1165, 1158, 1631, 358, 301, 305, 1159, 1523, - /* 400 */ 60, 274, 110, 59, 1182, 128, 312, 1662, 9, 1627, - /* 410 */ 1633, 1521, 1478, 1185, 1480, 503, 481, 142, 1722, 1723, - /* 420 */ 522, 1727, 468, 1157, 1196, 502, 342, 110, 341, 1617, - /* 430 */ 601, 313, 1247, 64, 289, 1397, 1160, 188, 1559, 1561, - /* 440 */ 1165, 1158, 144, 1722, 1723, 1675, 1727, 548, 263, 1647, - /* 450 */ 505, 1649, 1650, 501, 1370, 522, 9, 434, 1163, 1164, - /* 460 */ 334, 1209, 1210, 1212, 1213, 1214, 1215, 1216, 498, 520, - /* 470 */ 1224, 1225, 1226, 1227, 1228, 1229, 283, 311, 601, 148, - /* 480 */ 336, 332, 1248, 1120, 1160, 128, 475, 455, 148, 1158, - /* 490 */ 373, 1122, 474, 469, 1480, 1463, 519, 36, 34, 32, - /* 500 */ 31, 30, 1369, 1253, 1617, 1465, 1163, 1164, 359, 1209, - /* 510 */ 1210, 1212, 1213, 1214, 1215, 1216, 498, 520, 1224, 1225, - /* 520 */ 1226, 1227, 1228, 1229, 284, 1478, 282, 281, 1368, 421, - /* 530 */ 558, 1292, 1160, 423, 158, 157, 456, 1183, 214, 27, - /* 540 */ 299, 1242, 1243, 1244, 1245, 1246, 1250, 1251, 1252, 1646, - /* 550 */ 382, 1121, 1617, 386, 1163, 1164, 422, 1209, 1210, 1212, - /* 560 */ 1213, 1214, 1215, 1216, 498, 520, 1224, 1225, 1226, 1227, - /* 570 */ 1228, 1229, 35, 33, 270, 1777, 1181, 1662, 1617, 519, - /* 580 */ 301, 314, 1159, 366, 1249, 482, 378, 1461, 145, 128, - /* 590 */ 554, 397, 1774, 390, 423, 502, 386, 1235, 1480, 1617, - /* 600 */ 940, 54, 939, 1183, 379, 1254, 70, 1157, 1478, 553, - /* 610 */ 552, 1367, 551, 550, 549, 1675, 1366, 422, 81, 1647, - /* 620 */ 505, 1649, 1650, 501, 1165, 522, 1365, 1471, 1715, 941, - /* 630 */ 127, 1186, 294, 1711, 141, 1407, 32, 31, 30, 191, - /* 640 */ 2, 25, 1029, 545, 544, 543, 1033, 542, 1035, 1036, - /* 650 */ 541, 1038, 538, 1743, 1044, 535, 1046, 1047, 532, 529, - /* 660 */ 497, 1617, 601, 1364, 1363, 1362, 1617, 1359, 1358, 1357, - /* 670 */ 1356, 1355, 1354, 1158, 377, 1353, 1617, 372, 371, 370, - /* 680 */ 369, 368, 365, 364, 363, 362, 361, 357, 356, 355, - /* 690 */ 354, 353, 352, 351, 350, 577, 576, 575, 316, 1211, - /* 700 */ 574, 573, 572, 114, 567, 566, 565, 564, 563, 562, - /* 710 */ 561, 560, 121, 1617, 1617, 1617, 1160, 1617, 1617, 1617, - /* 720 */ 1617, 1617, 1617, 1352, 7, 1617, 128, 1351, 430, 429, - /* 730 */ 1646, 571, 569, 1560, 1561, 1481, 927, 928, 1163, 1164, - /* 740 */ 1729, 1209, 1210, 1212, 1213, 1214, 1215, 1216, 498, 520, - /* 750 */ 1224, 1225, 1226, 1227, 1228, 1229, 199, 129, 1662, 519, - /* 760 */ 1284, 1646, 252, 1729, 1725, 1211, 503, 991, 1159, 519, - /* 770 */ 1183, 1475, 1606, 1617, 250, 53, 502, 1617, 52, 506, - /* 780 */ 1617, 1597, 1734, 1280, 993, 1569, 483, 1724, 1478, 1662, - /* 790 */ 1144, 1145, 487, 1157, 479, 159, 1675, 482, 1478, 80, - /* 800 */ 1647, 505, 1649, 1650, 501, 246, 522, 502, 1508, 1715, - /* 810 */ 1165, 1617, 519, 273, 1711, 570, 454, 324, 179, 55, - /* 820 */ 1168, 177, 485, 112, 516, 1777, 181, 1675, 490, 180, - /* 830 */ 81, 1647, 505, 1649, 1650, 501, 1637, 522, 145, 519, - /* 840 */ 1715, 1478, 1774, 483, 294, 1711, 141, 183, 601, 185, - /* 850 */ 182, 517, 184, 445, 495, 337, 79, 1646, 215, 1158, - /* 860 */ 110, 519, 519, 547, 461, 1742, 443, 964, 1478, 47, - /* 870 */ 272, 118, 1639, 236, 315, 212, 1722, 478, 1392, 477, - /* 880 */ 11, 10, 1777, 1390, 965, 1662, 1171, 58, 57, 346, - /* 890 */ 1478, 1478, 153, 503, 1280, 147, 1361, 340, 46, 1774, - /* 900 */ 436, 1167, 1160, 502, 202, 439, 37, 1617, 1436, 269, - /* 910 */ 37, 457, 330, 37, 326, 322, 150, 225, 1343, 1344, - /* 920 */ 1646, 1455, 116, 1675, 1163, 1164, 81, 1647, 505, 1649, - /* 930 */ 1650, 501, 1211, 522, 218, 117, 1715, 466, 1306, 76, - /* 940 */ 294, 1711, 1790, 448, 204, 118, 1255, 148, 1662, 72, - /* 950 */ 1217, 1749, 1663, 1115, 1381, 46, 503, 227, 527, 209, - /* 960 */ 416, 174, 511, 480, 1518, 1283, 502, 1170, 1745, 217, - /* 970 */ 1617, 117, 1646, 139, 1239, 233, 220, 488, 222, 415, - /* 980 */ 411, 407, 403, 173, 118, 1022, 1675, 119, 117, 81, - /* 990 */ 1647, 505, 1649, 1650, 501, 245, 522, 1453, 1050, 1715, - /* 1000 */ 1662, 3, 1181, 294, 1711, 1790, 319, 63, 503, 323, - /* 1010 */ 171, 1054, 991, 554, 1772, 491, 280, 279, 502, 241, - /* 1020 */ 1128, 155, 1617, 360, 1061, 1558, 367, 1059, 120, 375, - /* 1030 */ 374, 1646, 553, 552, 380, 551, 550, 549, 1675, 1187, - /* 1040 */ 376, 81, 1647, 505, 1649, 1650, 501, 438, 522, 381, - /* 1050 */ 389, 1715, 1190, 162, 392, 294, 1711, 1790, 393, 1662, - /* 1060 */ 1189, 164, 446, 394, 166, 395, 1733, 503, 170, 1188, - /* 1070 */ 165, 396, 167, 399, 169, 61, 187, 502, 418, 172, - /* 1080 */ 1165, 1617, 1646, 420, 1468, 176, 1464, 483, 441, 554, - /* 1090 */ 84, 242, 163, 435, 288, 1601, 178, 1675, 186, 1646, - /* 1100 */ 259, 1647, 505, 1649, 1650, 501, 122, 522, 553, 552, - /* 1110 */ 1662, 551, 550, 549, 123, 1466, 1462, 124, 503, 125, - /* 1120 */ 449, 189, 51, 453, 450, 50, 1777, 1662, 502, 243, - /* 1130 */ 458, 192, 1617, 194, 1186, 503, 197, 459, 483, 147, - /* 1140 */ 1746, 467, 509, 1774, 1756, 502, 6, 200, 1675, 1617, - /* 1150 */ 463, 259, 1647, 505, 1649, 1650, 501, 1646, 522, 464, - /* 1160 */ 476, 5, 1736, 203, 1755, 1675, 293, 210, 82, 1647, - /* 1170 */ 505, 1649, 1650, 501, 1280, 522, 111, 1777, 1715, 470, - /* 1180 */ 208, 1185, 1714, 1711, 1348, 1662, 40, 211, 492, 1646, - /* 1190 */ 145, 1773, 135, 503, 1774, 1730, 295, 489, 18, 1567, - /* 1200 */ 1793, 507, 508, 502, 1566, 512, 303, 1617, 103, 102, - /* 1210 */ 101, 100, 99, 98, 97, 96, 95, 1662, 216, 479, - /* 1220 */ 1696, 513, 229, 1675, 219, 500, 82, 1647, 505, 1649, - /* 1230 */ 1650, 501, 514, 522, 231, 502, 1715, 69, 486, 1617, - /* 1240 */ 494, 1711, 1646, 493, 221, 244, 1479, 71, 112, 525, - /* 1250 */ 1451, 247, 600, 238, 48, 1675, 134, 253, 267, 1647, - /* 1260 */ 505, 1649, 1650, 501, 499, 522, 496, 1687, 483, 290, - /* 1270 */ 1662, 260, 249, 254, 251, 1611, 1610, 318, 503, 1607, - /* 1280 */ 320, 321, 1153, 1154, 151, 110, 325, 1605, 502, 327, - /* 1290 */ 328, 329, 1617, 1604, 331, 1603, 333, 1602, 335, 1646, - /* 1300 */ 212, 1722, 478, 1587, 477, 152, 339, 1777, 1675, 338, - /* 1310 */ 1131, 82, 1647, 505, 1649, 1650, 501, 1581, 522, 1130, - /* 1320 */ 145, 1715, 1580, 344, 1774, 345, 1712, 1662, 604, 1579, - /* 1330 */ 1578, 1551, 1098, 1550, 1549, 503, 1548, 1547, 1546, 1545, - /* 1340 */ 1544, 1543, 240, 1100, 115, 502, 1646, 1542, 1541, 1617, - /* 1350 */ 1540, 1539, 462, 1538, 105, 1537, 1536, 1535, 1534, 1533, - /* 1360 */ 593, 589, 585, 581, 239, 1675, 1532, 1531, 268, 1647, - /* 1370 */ 505, 1649, 1650, 501, 1662, 522, 1530, 1529, 1528, 1527, - /* 1380 */ 1526, 1525, 503, 1524, 1409, 1646, 1377, 138, 78, 160, - /* 1390 */ 1376, 234, 502, 1595, 1589, 1573, 1617, 106, 385, 930, - /* 1400 */ 161, 929, 107, 1564, 1457, 387, 168, 1408, 1406, 401, - /* 1410 */ 400, 958, 1675, 1662, 1404, 132, 1647, 505, 1649, 1650, - /* 1420 */ 501, 503, 522, 1402, 1400, 515, 402, 1389, 406, 404, - /* 1430 */ 1388, 502, 175, 405, 410, 1617, 1646, 414, 298, 408, - /* 1440 */ 1375, 409, 1459, 1458, 413, 412, 1398, 1064, 1065, 990, - /* 1450 */ 460, 1675, 989, 195, 268, 1647, 505, 1649, 1650, 501, - /* 1460 */ 1792, 522, 45, 988, 1662, 1646, 987, 568, 1393, 570, - /* 1470 */ 285, 1136, 500, 190, 286, 1391, 984, 287, 983, 1374, - /* 1480 */ 982, 440, 502, 437, 442, 1373, 1617, 444, 1594, 83, - /* 1490 */ 1588, 1138, 451, 1662, 1572, 1571, 126, 1646, 1563, 4, - /* 1500 */ 65, 503, 1675, 196, 37, 267, 1647, 505, 1649, 1650, - /* 1510 */ 501, 502, 522, 49, 1688, 1617, 452, 193, 300, 15, - /* 1520 */ 201, 43, 1305, 206, 41, 1662, 1298, 133, 207, 205, - /* 1530 */ 22, 1675, 23, 503, 268, 1647, 505, 1649, 1650, 501, - /* 1540 */ 1637, 522, 1277, 502, 66, 213, 198, 1617, 1276, 42, - /* 1550 */ 302, 136, 1334, 1646, 16, 17, 13, 1323, 1329, 10, - /* 1560 */ 1328, 19, 296, 1675, 1333, 1332, 268, 1647, 505, 1649, - /* 1570 */ 1650, 501, 297, 522, 1219, 137, 149, 29, 1204, 510, - /* 1580 */ 1218, 1662, 12, 20, 1646, 21, 226, 1240, 1562, 503, - /* 1590 */ 504, 224, 230, 232, 72, 1636, 235, 1303, 1175, 502, - /* 1600 */ 1221, 228, 67, 1617, 68, 526, 1678, 521, 44, 310, - /* 1610 */ 1051, 1048, 1662, 1646, 524, 528, 530, 531, 533, 1675, - /* 1620 */ 503, 1045, 255, 1647, 505, 1649, 1650, 501, 534, 522, - /* 1630 */ 502, 1039, 536, 537, 1617, 1037, 539, 1043, 1042, 540, - /* 1640 */ 1041, 1662, 1040, 1028, 73, 74, 1060, 75, 1057, 503, - /* 1650 */ 1675, 1056, 546, 262, 1647, 505, 1649, 1650, 501, 502, - /* 1660 */ 522, 956, 1646, 1617, 555, 557, 237, 997, 308, 307, - /* 1670 */ 978, 977, 973, 1646, 1058, 976, 994, 975, 1173, 1675, - /* 1680 */ 974, 972, 264, 1647, 505, 1649, 1650, 501, 971, 522, - /* 1690 */ 1662, 992, 968, 967, 966, 963, 962, 1405, 503, 961, - /* 1700 */ 578, 1662, 579, 1166, 580, 1403, 582, 583, 502, 503, - /* 1710 */ 584, 1401, 1617, 586, 587, 588, 1399, 590, 592, 502, - /* 1720 */ 1165, 591, 1387, 1617, 1646, 1386, 594, 1372, 1675, 595, - /* 1730 */ 598, 256, 1647, 505, 1649, 1650, 501, 1161, 522, 1675, - /* 1740 */ 599, 603, 265, 1647, 505, 1649, 1650, 501, 248, 522, - /* 1750 */ 602, 1347, 1662, 1646, 1347, 1347, 1347, 1347, 523, 1347, - /* 1760 */ 503, 1347, 1347, 1347, 1347, 1347, 1347, 1347, 1347, 1169, - /* 1770 */ 502, 1347, 1347, 1347, 1617, 1347, 1347, 1347, 1347, 1347, - /* 1780 */ 1347, 1662, 1347, 1347, 1347, 1646, 1347, 1347, 1347, 503, - /* 1790 */ 1675, 1347, 1347, 257, 1647, 505, 1649, 1650, 501, 502, - /* 1800 */ 522, 1347, 1347, 1617, 1347, 1347, 1347, 1347, 1347, 1347, - /* 1810 */ 1347, 1347, 1174, 1662, 1347, 1347, 1347, 1347, 1347, 1675, - /* 1820 */ 1347, 503, 266, 1647, 505, 1649, 1650, 501, 1347, 522, - /* 1830 */ 1347, 502, 1347, 1347, 1177, 1617, 1347, 1347, 1347, 1347, - /* 1840 */ 1347, 1646, 1347, 1347, 1347, 520, 1224, 1225, 1347, 1347, - /* 1850 */ 1347, 1675, 1347, 1347, 258, 1647, 505, 1649, 1650, 501, - /* 1860 */ 1347, 522, 1347, 1347, 1347, 1347, 1347, 1347, 1347, 1662, - /* 1870 */ 1347, 1347, 1646, 1347, 1347, 1347, 1347, 503, 1347, 1347, - /* 1880 */ 1347, 1347, 1347, 1347, 1347, 1347, 1347, 502, 1347, 1347, - /* 1890 */ 1347, 1617, 1347, 1347, 1347, 1347, 1347, 1347, 1347, 1347, - /* 1900 */ 1662, 1646, 1347, 1347, 1347, 1347, 1347, 1675, 503, 1347, - /* 1910 */ 1658, 1647, 505, 1649, 1650, 501, 1347, 522, 502, 1347, - /* 1920 */ 1347, 1347, 1617, 1347, 1347, 1347, 1347, 1347, 1347, 1662, - /* 1930 */ 1347, 1347, 1646, 1347, 1347, 1347, 1347, 503, 1675, 1347, - /* 1940 */ 1347, 1657, 1647, 505, 1649, 1650, 501, 502, 522, 1347, - /* 1950 */ 1347, 1617, 1347, 1347, 1347, 1347, 1347, 1347, 1347, 1347, - /* 1960 */ 1662, 1646, 1347, 1347, 1347, 1347, 1347, 1675, 503, 1347, - /* 1970 */ 1656, 1647, 505, 1649, 1650, 501, 1347, 522, 502, 1347, - /* 1980 */ 1347, 1347, 1617, 1347, 1347, 1347, 1347, 1347, 1347, 1662, - /* 1990 */ 1347, 1347, 1347, 1347, 1347, 1347, 1347, 503, 1675, 1347, - /* 2000 */ 1347, 277, 1647, 505, 1649, 1650, 501, 502, 522, 1347, - /* 2010 */ 1347, 1617, 1646, 1347, 1347, 1347, 1347, 1347, 1347, 1347, - /* 2020 */ 1347, 1347, 1347, 1646, 1347, 1347, 1347, 1675, 1347, 1347, - /* 2030 */ 276, 1647, 505, 1649, 1650, 501, 1347, 522, 1347, 1347, - /* 2040 */ 1662, 1347, 1347, 1347, 1347, 1347, 1347, 1347, 503, 1347, - /* 2050 */ 1347, 1662, 1347, 1347, 1347, 1347, 1347, 1347, 502, 503, - /* 2060 */ 1347, 1347, 1617, 1347, 1347, 1347, 1347, 1347, 1347, 502, - /* 2070 */ 1347, 1347, 1347, 1617, 1347, 1347, 1347, 1646, 1675, 1347, - /* 2080 */ 1347, 278, 1647, 505, 1649, 1650, 501, 1347, 522, 1675, - /* 2090 */ 1347, 1347, 275, 1647, 505, 1649, 1650, 501, 1347, 522, - /* 2100 */ 1347, 1347, 1347, 1347, 1347, 1662, 1347, 1347, 1347, 1347, - /* 2110 */ 1347, 1347, 1347, 503, 1347, 1347, 1347, 1347, 1347, 1347, - /* 2120 */ 1347, 1347, 1347, 502, 1347, 1347, 1347, 1617, 1347, 1347, - /* 2130 */ 1347, 1347, 1347, 1347, 1347, 1347, 1347, 1347, 1347, 1347, - /* 2140 */ 1347, 1347, 1347, 1675, 1347, 1347, 261, 1647, 505, 1649, - /* 2150 */ 1650, 501, 1347, 522, + /* 0 */ 386, 1647, 387, 1391, 295, 394, 524, 387, 1391, 28, + /* 10 */ 226, 931, 35, 33, 130, 1676, 1371, 1660, 104, 1791, + /* 20 */ 304, 1644, 1169, 477, 523, 424, 36, 34, 32, 31, + /* 30 */ 30, 385, 1790, 62, 389, 1490, 1788, 1640, 1646, 36, + /* 40 */ 34, 32, 31, 30, 1535, 1676, 108, 1167, 527, 935, + /* 50 */ 936, 294, 1000, 508, 524, 1485, 1533, 154, 14, 476, + /* 60 */ 35, 33, 1296, 507, 1175, 24, 350, 1630, 304, 1002, + /* 70 */ 1169, 1418, 277, 488, 523, 36, 34, 32, 31, 30, + /* 80 */ 56, 1, 60, 1490, 1689, 59, 524, 80, 1661, 510, + /* 90 */ 1663, 1664, 506, 1359, 527, 1167, 1207, 1729, 104, 603, + /* 100 */ 602, 276, 1725, 607, 1258, 429, 14, 36, 34, 32, + /* 110 */ 31, 30, 1175, 1791, 1168, 1490, 140, 103, 102, 101, + /* 120 */ 100, 99, 98, 97, 96, 95, 147, 376, 1529, 2, + /* 130 */ 1788, 583, 582, 581, 319, 39, 580, 579, 578, 114, + /* 140 */ 573, 572, 571, 570, 569, 568, 567, 566, 121, 562, + /* 150 */ 511, 607, 1568, 307, 1259, 55, 1580, 55, 1170, 156, + /* 160 */ 94, 1791, 1168, 93, 92, 91, 90, 89, 88, 87, + /* 170 */ 86, 85, 158, 157, 146, 352, 1264, 1352, 1788, 393, + /* 180 */ 1173, 1174, 389, 1220, 1221, 1223, 1224, 1225, 1226, 1227, + /* 190 */ 503, 525, 1235, 1236, 1237, 1238, 1239, 1240, 1468, 36, + /* 200 */ 34, 32, 31, 30, 64, 292, 1170, 131, 191, 274, + /* 210 */ 148, 1447, 27, 302, 1253, 1254, 1255, 1256, 1257, 1261, + /* 220 */ 1262, 1263, 1421, 36, 34, 32, 31, 30, 1173, 1174, + /* 230 */ 484, 1220, 1221, 1223, 1224, 1225, 1226, 1227, 503, 525, + /* 240 */ 1235, 1236, 1237, 1238, 1239, 1240, 35, 33, 1467, 948, + /* 250 */ 70, 947, 438, 437, 304, 403, 1169, 436, 1351, 112, + /* 260 */ 109, 433, 308, 1791, 432, 431, 430, 35, 33, 1310, + /* 270 */ 128, 1483, 1660, 403, 523, 304, 1789, 1169, 949, 1492, + /* 280 */ 1788, 1167, 438, 437, 148, 1193, 148, 436, 62, 972, + /* 290 */ 109, 433, 14, 1207, 432, 431, 430, 110, 1175, 1360, + /* 300 */ 1676, 1303, 1167, 1382, 1660, 524, 973, 1193, 508, 524, + /* 310 */ 1486, 486, 142, 1736, 1737, 2, 1741, 351, 507, 1175, + /* 320 */ 94, 361, 1630, 93, 92, 91, 90, 89, 88, 87, + /* 330 */ 86, 85, 1676, 1381, 1490, 38, 8, 607, 1490, 1689, + /* 340 */ 487, 559, 82, 1661, 510, 1663, 1664, 506, 1168, 527, + /* 350 */ 507, 1191, 1729, 1630, 1630, 1535, 1728, 1725, 607, 128, + /* 360 */ 558, 557, 309, 556, 555, 554, 1380, 1533, 1493, 1168, + /* 370 */ 565, 1689, 1462, 1535, 81, 1661, 510, 1663, 1664, 506, + /* 380 */ 315, 527, 524, 1630, 1729, 1533, 1743, 26, 297, 1725, + /* 390 */ 141, 478, 1170, 54, 362, 435, 434, 36, 34, 32, + /* 400 */ 31, 30, 218, 36, 34, 32, 31, 30, 466, 1756, + /* 410 */ 1740, 1490, 55, 1170, 1173, 1174, 1630, 1220, 1221, 1223, + /* 420 */ 1224, 1225, 1226, 1227, 503, 525, 1235, 1236, 1237, 1238, + /* 430 */ 1239, 1240, 460, 577, 575, 1173, 1174, 1379, 1220, 1221, + /* 440 */ 1223, 1224, 1225, 1226, 1227, 503, 525, 1235, 1236, 1237, + /* 450 */ 1238, 1239, 1240, 35, 33, 1241, 1378, 443, 1195, 610, + /* 460 */ 316, 304, 1377, 1169, 148, 148, 249, 1571, 1573, 1520, + /* 470 */ 1246, 1222, 451, 243, 35, 33, 1193, 1481, 1376, 1660, + /* 480 */ 1647, 461, 304, 312, 1169, 105, 190, 1630, 1167, 524, + /* 490 */ 473, 599, 595, 591, 587, 242, 391, 1644, 446, 1357, + /* 500 */ 1644, 402, 1191, 440, 561, 1175, 1630, 1676, 189, 1167, + /* 510 */ 337, 484, 1630, 1640, 1646, 508, 1640, 1646, 1490, 484, + /* 520 */ 78, 1791, 9, 237, 527, 507, 1175, 527, 1630, 1630, + /* 530 */ 339, 335, 564, 51, 145, 488, 50, 127, 1788, 511, + /* 540 */ 112, 148, 576, 9, 607, 1581, 1689, 1194, 112, 80, + /* 550 */ 1661, 510, 1663, 1664, 506, 1168, 527, 520, 320, 1729, + /* 560 */ 1375, 479, 474, 276, 1725, 607, 36, 34, 32, 31, + /* 570 */ 30, 1648, 1130, 314, 1479, 1791, 1168, 428, 110, 553, + /* 580 */ 1132, 128, 465, 340, 217, 198, 110, 1465, 145, 55, + /* 590 */ 1492, 1644, 1788, 143, 1736, 1737, 77, 1741, 1791, 1170, + /* 600 */ 427, 144, 1736, 1737, 1146, 1741, 193, 1640, 1646, 113, + /* 610 */ 1630, 145, 277, 1572, 1573, 1788, 490, 1482, 527, 1295, + /* 620 */ 1170, 1173, 1174, 1374, 1220, 1221, 1223, 1224, 1225, 1226, + /* 630 */ 1227, 503, 525, 1235, 1236, 1237, 1238, 1239, 1240, 286, + /* 640 */ 1222, 1131, 1173, 1174, 1258, 1220, 1221, 1223, 1224, 1225, + /* 650 */ 1226, 1227, 503, 525, 1235, 1236, 1237, 1238, 1239, 1240, + /* 660 */ 35, 33, 273, 559, 1191, 345, 1320, 344, 304, 524, + /* 670 */ 1169, 369, 524, 1630, 381, 32, 31, 30, 1748, 1291, + /* 680 */ 559, 1487, 558, 557, 1610, 556, 555, 554, 287, 7, + /* 690 */ 285, 284, 382, 426, 1259, 1167, 947, 428, 1490, 558, + /* 700 */ 557, 1490, 556, 555, 554, 470, 1318, 1319, 1321, 1322, + /* 710 */ 1535, 317, 1175, 11, 10, 1373, 1264, 1743, 148, 128, + /* 720 */ 427, 422, 1534, 935, 936, 1743, 1154, 1155, 1492, 2, + /* 730 */ 1038, 550, 549, 548, 1042, 547, 1044, 1045, 546, 1047, + /* 740 */ 543, 1739, 1053, 540, 1055, 1056, 537, 534, 346, 1738, + /* 750 */ 1370, 607, 27, 302, 1253, 1254, 1255, 1256, 1257, 1261, + /* 760 */ 1262, 1263, 1168, 380, 1466, 1630, 375, 374, 373, 372, + /* 770 */ 371, 368, 367, 366, 365, 364, 360, 359, 358, 357, + /* 780 */ 356, 355, 354, 353, 524, 524, 129, 524, 1791, 1196, + /* 790 */ 492, 255, 1192, 1193, 1272, 1260, 521, 522, 1475, 239, + /* 800 */ 1630, 145, 1369, 253, 53, 1788, 1170, 52, 1368, 1367, + /* 810 */ 1366, 452, 1365, 1490, 1490, 1364, 1490, 1265, 524, 1363, + /* 820 */ 1660, 561, 1362, 47, 159, 275, 1294, 1477, 1173, 1174, + /* 830 */ 318, 1220, 1221, 1223, 1224, 1225, 1226, 1227, 503, 525, + /* 840 */ 1235, 1236, 1237, 1238, 1239, 1240, 495, 1490, 1676, 55, + /* 850 */ 1473, 1791, 1630, 25, 1619, 194, 487, 1408, 1630, 1630, + /* 860 */ 1630, 1291, 1630, 1403, 145, 1630, 507, 1401, 1788, 1630, + /* 870 */ 1630, 182, 1630, 184, 180, 186, 183, 188, 185, 439, + /* 880 */ 187, 1660, 500, 450, 502, 441, 79, 1689, 76, 444, + /* 890 */ 81, 1661, 510, 1663, 1664, 506, 448, 527, 72, 327, + /* 900 */ 1729, 11, 10, 552, 297, 1725, 141, 1372, 459, 1676, + /* 910 */ 1354, 1355, 1650, 1448, 1660, 202, 1178, 508, 58, 57, + /* 920 */ 349, 118, 46, 153, 471, 1757, 1177, 507, 343, 205, + /* 930 */ 221, 1630, 37, 37, 37, 453, 212, 1677, 1392, 228, + /* 940 */ 272, 421, 1676, 333, 1530, 329, 325, 150, 1689, 1652, + /* 950 */ 508, 81, 1661, 510, 1663, 1664, 506, 1222, 527, 1759, + /* 960 */ 507, 1729, 462, 1317, 1630, 297, 1725, 1804, 1191, 116, + /* 970 */ 207, 117, 485, 1266, 1228, 1124, 1763, 493, 148, 1660, + /* 980 */ 230, 1689, 220, 1181, 81, 1661, 510, 1663, 1664, 506, + /* 990 */ 223, 527, 118, 1180, 1729, 1660, 46, 532, 297, 1725, + /* 1000 */ 1804, 322, 117, 225, 1250, 3, 118, 1676, 326, 1786, + /* 1010 */ 516, 282, 236, 1000, 283, 508, 119, 117, 244, 155, + /* 1020 */ 1138, 363, 370, 1676, 1570, 507, 378, 1660, 377, 1630, + /* 1030 */ 379, 508, 383, 1031, 1197, 496, 384, 248, 1059, 392, + /* 1040 */ 1200, 507, 395, 1063, 162, 1630, 1689, 1070, 396, 82, + /* 1050 */ 1661, 510, 1663, 1664, 506, 1676, 527, 1068, 120, 1729, + /* 1060 */ 1199, 164, 1689, 508, 1726, 81, 1661, 510, 1663, 1664, + /* 1070 */ 506, 1201, 527, 507, 397, 1729, 398, 1630, 1660, 297, + /* 1080 */ 1725, 1804, 167, 488, 399, 169, 1198, 400, 401, 172, + /* 1090 */ 1747, 61, 404, 1660, 1689, 175, 423, 262, 1661, 510, + /* 1100 */ 1663, 1664, 506, 425, 527, 84, 1676, 1175, 1480, 179, + /* 1110 */ 1476, 291, 181, 1614, 508, 122, 123, 1478, 1474, 124, + /* 1120 */ 125, 1676, 245, 1791, 507, 192, 455, 195, 1630, 508, + /* 1130 */ 246, 197, 454, 464, 488, 463, 147, 200, 1196, 507, + /* 1140 */ 1788, 458, 472, 1630, 1660, 1689, 1770, 203, 262, 1661, + /* 1150 */ 510, 1663, 1664, 506, 514, 527, 6, 1750, 469, 1769, + /* 1160 */ 1689, 211, 481, 82, 1661, 510, 1663, 1664, 506, 206, + /* 1170 */ 527, 1760, 1676, 1729, 1791, 296, 475, 499, 1725, 1195, + /* 1180 */ 505, 468, 5, 1291, 111, 40, 497, 145, 1744, 1807, + /* 1190 */ 507, 1788, 298, 18, 1630, 512, 1660, 513, 494, 306, + /* 1200 */ 311, 310, 1579, 135, 1578, 1660, 214, 517, 518, 519, + /* 1210 */ 1183, 1689, 213, 1787, 270, 1661, 510, 1663, 1664, 506, + /* 1220 */ 504, 527, 501, 1701, 1676, 219, 232, 71, 491, 1710, + /* 1230 */ 234, 247, 508, 1676, 69, 1176, 250, 1491, 241, 222, + /* 1240 */ 606, 508, 507, 1463, 498, 48, 1630, 530, 224, 256, + /* 1250 */ 134, 507, 1175, 1660, 263, 1630, 257, 293, 467, 252, + /* 1260 */ 254, 1624, 1623, 1689, 321, 1620, 132, 1661, 510, 1663, + /* 1270 */ 1664, 506, 1689, 527, 323, 271, 1661, 510, 1663, 1664, + /* 1280 */ 506, 1676, 527, 324, 1163, 1660, 1164, 151, 1618, 508, + /* 1290 */ 328, 528, 330, 331, 1617, 332, 334, 1616, 336, 507, + /* 1300 */ 1615, 338, 1179, 1630, 1600, 152, 341, 1141, 342, 1140, + /* 1310 */ 489, 1805, 1594, 1676, 1593, 347, 348, 1660, 1592, 1591, + /* 1320 */ 1689, 508, 1107, 266, 1661, 510, 1663, 1664, 506, 1563, + /* 1330 */ 527, 507, 1562, 1561, 1560, 1630, 1559, 1558, 1557, 1556, + /* 1340 */ 1555, 1554, 1553, 1552, 1551, 1676, 1184, 1550, 1549, 1548, + /* 1350 */ 1547, 1546, 1689, 508, 1545, 132, 1661, 510, 1663, 1664, + /* 1360 */ 506, 480, 527, 507, 115, 1660, 1544, 1630, 1187, 1543, + /* 1370 */ 301, 1542, 1541, 1540, 1109, 1539, 1538, 1537, 1660, 525, + /* 1380 */ 1235, 1236, 1536, 1420, 1689, 1388, 160, 271, 1661, 510, + /* 1390 */ 1663, 1664, 506, 1676, 527, 938, 106, 138, 937, 388, + /* 1400 */ 1806, 505, 1387, 161, 390, 107, 1676, 1608, 1602, 1586, + /* 1410 */ 1585, 507, 1576, 1469, 508, 1630, 166, 171, 1660, 1419, + /* 1420 */ 966, 1417, 1415, 407, 507, 405, 1413, 411, 1630, 415, + /* 1430 */ 1411, 303, 1689, 419, 406, 270, 1661, 510, 1663, 1664, + /* 1440 */ 506, 409, 527, 410, 1702, 1689, 1676, 413, 271, 1661, + /* 1450 */ 510, 1663, 1664, 506, 508, 527, 414, 1400, 177, 1399, + /* 1460 */ 418, 417, 1386, 1471, 507, 1074, 1660, 1470, 1630, 1073, + /* 1470 */ 139, 305, 574, 576, 999, 1169, 420, 416, 412, 408, + /* 1480 */ 176, 45, 998, 178, 997, 1689, 996, 993, 271, 1661, + /* 1490 */ 510, 1663, 1664, 506, 1676, 527, 992, 991, 1409, 288, + /* 1500 */ 1167, 1404, 508, 289, 442, 63, 1402, 290, 174, 1385, + /* 1510 */ 447, 445, 507, 1384, 449, 83, 1630, 1175, 1607, 1148, + /* 1520 */ 49, 1601, 456, 1660, 1584, 126, 1583, 1575, 199, 65, + /* 1530 */ 196, 4, 133, 1689, 201, 37, 258, 1661, 510, 1663, + /* 1540 */ 1664, 506, 204, 527, 15, 457, 43, 1316, 1309, 208, + /* 1550 */ 22, 1676, 209, 23, 210, 66, 607, 1288, 1650, 508, + /* 1560 */ 1287, 216, 1345, 42, 136, 41, 173, 1168, 165, 507, + /* 1570 */ 170, 1660, 168, 1630, 17, 1340, 1339, 16, 13, 1334, + /* 1580 */ 10, 299, 1344, 1343, 300, 1251, 19, 137, 149, 1230, + /* 1590 */ 1689, 163, 1215, 265, 1661, 510, 1663, 1664, 506, 1676, + /* 1600 */ 527, 1660, 509, 1574, 29, 515, 12, 508, 1649, 233, + /* 1610 */ 72, 1170, 1229, 20, 235, 1185, 531, 507, 238, 21, + /* 1620 */ 229, 1630, 227, 529, 1314, 964, 313, 231, 67, 1676, + /* 1630 */ 68, 1660, 1692, 1173, 1174, 1232, 526, 508, 1689, 44, + /* 1640 */ 533, 267, 1661, 510, 1663, 1664, 506, 507, 527, 1060, + /* 1650 */ 1057, 1630, 535, 538, 536, 541, 544, 1054, 539, 1676, + /* 1660 */ 1037, 1052, 1048, 542, 1069, 1046, 545, 508, 1689, 551, + /* 1670 */ 1051, 259, 1661, 510, 1663, 1664, 506, 507, 527, 1660, + /* 1680 */ 73, 1630, 74, 75, 1066, 1065, 1050, 560, 1660, 1049, + /* 1690 */ 988, 1006, 563, 240, 986, 985, 984, 983, 1689, 981, + /* 1700 */ 1067, 268, 1661, 510, 1663, 1664, 506, 1676, 527, 982, + /* 1710 */ 1003, 980, 979, 1001, 976, 508, 1676, 975, 974, 971, + /* 1720 */ 970, 969, 1416, 584, 508, 507, 585, 586, 1414, 1630, + /* 1730 */ 588, 589, 590, 1412, 507, 592, 1660, 593, 1630, 594, + /* 1740 */ 1410, 596, 597, 598, 1398, 600, 1689, 601, 1397, 260, + /* 1750 */ 1661, 510, 1663, 1664, 506, 1689, 527, 1383, 269, 1661, + /* 1760 */ 510, 1663, 1664, 506, 1676, 527, 609, 604, 605, 1358, + /* 1770 */ 1358, 1171, 508, 251, 608, 1358, 1358, 1358, 1358, 1358, + /* 1780 */ 1358, 1358, 507, 1358, 1660, 1358, 1630, 1358, 1358, 1358, + /* 1790 */ 1358, 1358, 1358, 1358, 1358, 1660, 1358, 1358, 1358, 1358, + /* 1800 */ 1358, 1358, 1358, 1689, 1358, 1358, 261, 1661, 510, 1663, + /* 1810 */ 1664, 506, 1676, 527, 1660, 1358, 1358, 1358, 1358, 1358, + /* 1820 */ 508, 1358, 1358, 1676, 1358, 1358, 1358, 1358, 1358, 1358, + /* 1830 */ 507, 508, 1358, 1358, 1630, 1358, 1358, 1358, 1358, 1358, + /* 1840 */ 1358, 507, 1676, 1358, 1358, 1630, 1358, 1358, 1358, 1358, + /* 1850 */ 508, 1689, 1358, 1358, 1672, 1661, 510, 1663, 1664, 506, + /* 1860 */ 507, 527, 1689, 1358, 1630, 1671, 1661, 510, 1663, 1664, + /* 1870 */ 506, 1358, 527, 1660, 1358, 1358, 1358, 1358, 1358, 1358, + /* 1880 */ 1358, 1689, 1660, 1358, 1670, 1661, 510, 1663, 1664, 506, + /* 1890 */ 1358, 527, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, + /* 1900 */ 1358, 1676, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 508, + /* 1910 */ 1676, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 508, 507, + /* 1920 */ 1358, 1358, 1358, 1630, 1358, 1358, 1358, 1358, 507, 1358, + /* 1930 */ 1358, 1358, 1630, 1358, 1358, 1358, 1358, 1358, 1358, 1358, + /* 1940 */ 1689, 1358, 1660, 280, 1661, 510, 1663, 1664, 506, 1689, + /* 1950 */ 527, 1660, 279, 1661, 510, 1663, 1664, 506, 1358, 527, + /* 1960 */ 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, + /* 1970 */ 1676, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 508, 1676, + /* 1980 */ 1358, 1358, 1358, 1358, 1358, 1358, 1358, 508, 507, 1358, + /* 1990 */ 1358, 1358, 1630, 1358, 1358, 1358, 1358, 507, 1358, 1660, + /* 2000 */ 1358, 1630, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1689, + /* 2010 */ 484, 1358, 281, 1661, 510, 1663, 1664, 506, 1689, 527, + /* 2020 */ 1358, 278, 1661, 510, 1663, 1664, 506, 1676, 527, 1358, + /* 2030 */ 1358, 1358, 1358, 1358, 1358, 508, 1358, 1358, 1358, 112, + /* 2040 */ 1358, 1358, 1358, 1358, 1358, 507, 484, 1358, 1358, 1630, + /* 2050 */ 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 488, + /* 2060 */ 1358, 1358, 1358, 1358, 1358, 1358, 1689, 1358, 1358, 264, + /* 2070 */ 1661, 510, 1663, 1664, 506, 112, 527, 110, 1358, 1358, + /* 2080 */ 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, + /* 2090 */ 1358, 1358, 215, 1736, 483, 488, 482, 1358, 1358, 1791, + /* 2100 */ 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, + /* 2110 */ 1358, 1358, 147, 110, 1358, 1358, 1788, 1358, 1358, 1358, + /* 2120 */ 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 215, 1736, + /* 2130 */ 483, 1358, 482, 1358, 1358, 1791, 1358, 1358, 1358, 1358, + /* 2140 */ 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 145, 1358, + /* 2150 */ 1358, 1358, 1788, }; static const YYCODETYPE yy_lookahead[] = { - /* 0 */ 270, 336, 336, 241, 244, 271, 246, 247, 274, 1, - /* 10 */ 2, 297, 12, 13, 349, 349, 2, 4, 353, 353, - /* 20 */ 20, 244, 22, 246, 247, 291, 12, 13, 14, 15, - /* 30 */ 16, 269, 2, 12, 13, 14, 15, 16, 20, 277, - /* 40 */ 306, 307, 12, 13, 14, 15, 16, 47, 238, 287, - /* 50 */ 336, 317, 240, 291, 242, 42, 43, 20, 58, 297, - /* 60 */ 12, 13, 14, 349, 64, 321, 322, 353, 20, 307, - /* 70 */ 22, 248, 310, 311, 312, 313, 314, 315, 251, 317, - /* 80 */ 80, 253, 320, 336, 20, 248, 324, 325, 20, 81, - /* 90 */ 271, 264, 57, 274, 266, 47, 349, 260, 336, 272, - /* 100 */ 353, 241, 102, 275, 267, 282, 58, 297, 12, 13, - /* 110 */ 291, 349, 64, 113, 277, 353, 20, 80, 22, 12, - /* 120 */ 13, 14, 15, 16, 14, 306, 307, 4, 80, 269, - /* 130 */ 20, 12, 13, 14, 15, 16, 317, 277, 12, 13, - /* 140 */ 14, 15, 16, 47, 80, 269, 336, 287, 80, 254, - /* 150 */ 102, 291, 276, 258, 58, 241, 156, 281, 269, 349, - /* 160 */ 64, 113, 257, 353, 259, 58, 277, 307, 268, 148, - /* 170 */ 310, 311, 312, 313, 314, 315, 80, 317, 178, 179, - /* 180 */ 280, 181, 182, 183, 184, 185, 186, 187, 188, 189, - /* 190 */ 190, 191, 192, 193, 194, 195, 89, 0, 102, 248, - /* 200 */ 81, 312, 20, 80, 156, 291, 0, 81, 208, 113, - /* 210 */ 20, 260, 297, 21, 354, 355, 24, 25, 26, 27, - /* 220 */ 28, 29, 30, 31, 32, 308, 178, 179, 277, 181, - /* 230 */ 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, - /* 240 */ 192, 193, 194, 195, 0, 208, 139, 248, 269, 332, - /* 250 */ 229, 336, 156, 249, 250, 276, 178, 60, 61, 80, - /* 260 */ 281, 269, 65, 57, 349, 68, 69, 160, 353, 72, - /* 270 */ 73, 74, 208, 281, 178, 179, 277, 181, 182, 183, - /* 280 */ 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, - /* 290 */ 194, 195, 12, 13, 0, 217, 218, 219, 220, 221, - /* 300 */ 20, 57, 22, 196, 197, 198, 199, 200, 201, 202, - /* 310 */ 203, 204, 205, 314, 22, 21, 253, 271, 24, 25, - /* 320 */ 26, 27, 28, 29, 30, 31, 32, 47, 329, 330, - /* 330 */ 331, 208, 333, 248, 20, 248, 248, 291, 275, 47, - /* 340 */ 12, 13, 14, 287, 64, 260, 290, 260, 20, 293, - /* 350 */ 22, 80, 306, 307, 267, 12, 13, 14, 15, 16, - /* 360 */ 80, 248, 277, 317, 277, 277, 12, 13, 14, 15, - /* 370 */ 16, 277, 60, 61, 271, 47, 55, 65, 284, 241, - /* 380 */ 68, 69, 102, 248, 72, 73, 74, 208, 12, 13, - /* 390 */ 277, 57, 64, 113, 291, 260, 20, 261, 22, 269, - /* 400 */ 79, 58, 314, 82, 20, 269, 276, 269, 80, 306, - /* 410 */ 307, 281, 277, 20, 278, 277, 328, 329, 330, 331, - /* 420 */ 317, 333, 143, 47, 81, 287, 155, 314, 157, 291, - /* 430 */ 102, 279, 89, 165, 166, 0, 156, 169, 286, 287, - /* 440 */ 64, 113, 329, 330, 331, 307, 333, 91, 310, 311, - /* 450 */ 312, 313, 314, 315, 241, 317, 80, 22, 178, 179, - /* 460 */ 151, 181, 182, 183, 184, 185, 186, 187, 188, 189, - /* 470 */ 190, 191, 192, 193, 194, 195, 35, 261, 102, 208, - /* 480 */ 171, 172, 139, 79, 156, 269, 348, 248, 208, 113, - /* 490 */ 75, 87, 213, 214, 278, 270, 248, 12, 13, 14, - /* 500 */ 15, 16, 241, 160, 291, 270, 178, 179, 260, 181, - /* 510 */ 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, - /* 520 */ 192, 193, 194, 195, 83, 277, 85, 86, 241, 88, - /* 530 */ 64, 14, 156, 92, 119, 120, 297, 20, 145, 196, - /* 540 */ 197, 198, 199, 200, 201, 202, 203, 204, 205, 241, - /* 550 */ 245, 147, 291, 248, 178, 179, 115, 181, 182, 183, - /* 560 */ 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, - /* 570 */ 194, 195, 12, 13, 18, 336, 20, 269, 291, 248, - /* 580 */ 20, 261, 22, 27, 139, 277, 30, 270, 349, 269, - /* 590 */ 92, 260, 353, 245, 92, 287, 248, 14, 278, 291, - /* 600 */ 20, 3, 22, 20, 48, 160, 251, 47, 277, 111, - /* 610 */ 112, 241, 114, 115, 116, 307, 241, 115, 310, 311, - /* 620 */ 312, 313, 314, 315, 64, 317, 241, 272, 320, 49, - /* 630 */ 145, 20, 324, 325, 326, 0, 14, 15, 16, 270, - /* 640 */ 80, 196, 93, 94, 95, 96, 97, 98, 99, 100, - /* 650 */ 101, 102, 103, 345, 105, 106, 107, 108, 109, 110, - /* 660 */ 270, 291, 102, 241, 241, 241, 291, 241, 241, 241, - /* 670 */ 241, 241, 241, 113, 118, 241, 291, 121, 122, 123, - /* 680 */ 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, - /* 690 */ 134, 135, 136, 137, 138, 60, 61, 62, 63, 182, - /* 700 */ 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, - /* 710 */ 75, 76, 77, 291, 291, 291, 156, 291, 291, 291, - /* 720 */ 291, 291, 291, 241, 37, 291, 269, 241, 255, 256, - /* 730 */ 241, 255, 256, 286, 287, 278, 42, 43, 178, 179, - /* 740 */ 308, 181, 182, 183, 184, 185, 186, 187, 188, 189, - /* 750 */ 190, 191, 192, 193, 194, 195, 145, 18, 269, 248, - /* 760 */ 4, 241, 23, 308, 332, 182, 277, 47, 22, 248, - /* 770 */ 20, 260, 0, 291, 35, 36, 287, 291, 39, 287, - /* 780 */ 291, 260, 206, 207, 64, 293, 297, 332, 277, 269, - /* 790 */ 167, 168, 41, 47, 248, 56, 307, 277, 277, 310, - /* 800 */ 311, 312, 313, 314, 315, 262, 317, 287, 265, 320, - /* 810 */ 64, 291, 248, 324, 325, 41, 300, 45, 84, 80, - /* 820 */ 47, 87, 224, 277, 260, 336, 84, 307, 41, 87, - /* 830 */ 310, 311, 312, 313, 314, 315, 44, 317, 349, 248, - /* 840 */ 320, 277, 353, 297, 324, 325, 326, 84, 102, 84, - /* 850 */ 87, 260, 87, 21, 58, 81, 117, 241, 338, 113, - /* 860 */ 314, 248, 248, 270, 344, 345, 34, 47, 277, 145, - /* 870 */ 146, 41, 80, 260, 260, 329, 330, 331, 0, 333, - /* 880 */ 1, 2, 336, 0, 64, 269, 113, 148, 149, 150, - /* 890 */ 277, 277, 153, 277, 207, 349, 242, 158, 41, 353, - /* 900 */ 22, 47, 156, 287, 41, 22, 41, 291, 258, 170, - /* 910 */ 41, 81, 173, 41, 175, 176, 177, 41, 193, 194, - /* 920 */ 241, 0, 41, 307, 178, 179, 310, 311, 312, 313, - /* 930 */ 314, 315, 182, 317, 356, 41, 320, 347, 81, 80, - /* 940 */ 324, 325, 326, 304, 81, 41, 81, 208, 269, 90, - /* 950 */ 81, 335, 269, 81, 247, 41, 277, 81, 41, 341, - /* 960 */ 249, 33, 81, 334, 280, 209, 287, 113, 309, 350, - /* 970 */ 291, 41, 241, 45, 178, 81, 350, 226, 350, 51, - /* 980 */ 52, 53, 54, 55, 41, 81, 307, 41, 41, 310, - /* 990 */ 311, 312, 313, 314, 315, 81, 317, 0, 81, 320, - /* 1000 */ 269, 337, 20, 324, 325, 326, 248, 79, 277, 45, - /* 1010 */ 82, 81, 47, 92, 335, 228, 255, 305, 287, 298, - /* 1020 */ 154, 40, 291, 248, 81, 248, 285, 81, 81, 139, - /* 1030 */ 283, 241, 111, 112, 248, 114, 115, 116, 307, 20, - /* 1040 */ 283, 310, 311, 312, 313, 314, 315, 4, 317, 243, - /* 1050 */ 243, 320, 20, 253, 302, 324, 325, 326, 287, 269, - /* 1060 */ 20, 253, 19, 295, 253, 277, 335, 277, 140, 20, - /* 1070 */ 142, 288, 144, 248, 253, 253, 33, 287, 243, 253, - /* 1080 */ 64, 291, 241, 269, 269, 269, 269, 297, 45, 92, - /* 1090 */ 248, 302, 164, 50, 243, 291, 269, 307, 55, 241, - /* 1100 */ 310, 311, 312, 313, 314, 315, 269, 317, 111, 112, - /* 1110 */ 269, 114, 115, 116, 269, 269, 269, 269, 277, 269, - /* 1120 */ 163, 251, 79, 287, 301, 82, 336, 269, 287, 295, - /* 1130 */ 277, 251, 291, 251, 20, 277, 251, 288, 297, 349, - /* 1140 */ 309, 216, 215, 353, 346, 287, 223, 292, 307, 291, - /* 1150 */ 211, 310, 311, 312, 313, 314, 315, 241, 317, 291, - /* 1160 */ 222, 210, 343, 292, 346, 307, 291, 339, 310, 311, - /* 1170 */ 312, 313, 314, 315, 207, 317, 277, 336, 320, 291, - /* 1180 */ 342, 20, 324, 325, 0, 269, 40, 327, 227, 241, - /* 1190 */ 349, 352, 340, 277, 353, 308, 230, 225, 80, 292, - /* 1200 */ 357, 291, 291, 287, 292, 142, 291, 291, 24, 25, - /* 1210 */ 26, 27, 28, 29, 30, 31, 32, 269, 351, 248, - /* 1220 */ 323, 289, 277, 307, 351, 277, 310, 311, 312, 313, - /* 1230 */ 314, 315, 288, 317, 251, 287, 320, 251, 352, 291, - /* 1240 */ 324, 325, 241, 352, 351, 265, 277, 80, 277, 273, - /* 1250 */ 259, 248, 243, 251, 299, 307, 303, 263, 310, 311, - /* 1260 */ 312, 313, 314, 315, 316, 317, 318, 319, 297, 296, - /* 1270 */ 269, 263, 252, 263, 239, 0, 0, 72, 277, 0, - /* 1280 */ 47, 174, 47, 47, 47, 314, 174, 0, 287, 47, - /* 1290 */ 47, 174, 291, 0, 47, 0, 47, 0, 47, 241, - /* 1300 */ 329, 330, 331, 0, 333, 80, 159, 336, 307, 160, - /* 1310 */ 113, 310, 311, 312, 313, 314, 315, 0, 317, 156, - /* 1320 */ 349, 320, 0, 152, 353, 151, 325, 269, 19, 0, - /* 1330 */ 0, 0, 44, 0, 0, 277, 0, 0, 0, 0, - /* 1340 */ 0, 0, 33, 22, 40, 287, 241, 0, 0, 291, - /* 1350 */ 0, 0, 294, 0, 45, 0, 0, 0, 0, 0, - /* 1360 */ 51, 52, 53, 54, 55, 307, 0, 0, 310, 311, - /* 1370 */ 312, 313, 314, 315, 269, 317, 0, 0, 0, 0, - /* 1380 */ 0, 0, 277, 0, 0, 241, 0, 41, 79, 40, - /* 1390 */ 0, 82, 287, 0, 0, 0, 291, 37, 44, 14, - /* 1400 */ 38, 14, 37, 0, 0, 44, 37, 0, 0, 45, - /* 1410 */ 47, 59, 307, 269, 0, 310, 311, 312, 313, 314, - /* 1420 */ 315, 277, 317, 0, 0, 116, 37, 0, 37, 47, - /* 1430 */ 0, 287, 87, 45, 37, 291, 241, 37, 294, 47, - /* 1440 */ 0, 45, 0, 0, 45, 47, 0, 22, 47, 47, - /* 1450 */ 141, 307, 47, 144, 310, 311, 312, 313, 314, 315, - /* 1460 */ 355, 317, 89, 47, 269, 241, 47, 41, 0, 41, - /* 1470 */ 22, 162, 277, 164, 22, 0, 47, 22, 47, 0, - /* 1480 */ 47, 47, 287, 48, 22, 0, 291, 22, 0, 20, - /* 1490 */ 0, 47, 22, 269, 0, 0, 161, 241, 0, 41, - /* 1500 */ 80, 277, 307, 37, 41, 310, 311, 312, 313, 314, - /* 1510 */ 315, 287, 317, 145, 319, 291, 145, 142, 294, 212, - /* 1520 */ 81, 41, 81, 41, 206, 269, 81, 80, 44, 80, - /* 1530 */ 80, 307, 41, 277, 310, 311, 312, 313, 314, 315, - /* 1540 */ 44, 317, 81, 287, 80, 44, 140, 291, 81, 41, - /* 1550 */ 294, 44, 81, 241, 212, 41, 212, 81, 47, 2, - /* 1560 */ 47, 41, 47, 307, 47, 47, 310, 311, 312, 313, - /* 1570 */ 314, 315, 47, 317, 81, 44, 44, 80, 22, 143, - /* 1580 */ 81, 269, 80, 80, 241, 80, 80, 178, 0, 277, - /* 1590 */ 180, 81, 37, 140, 90, 44, 44, 81, 22, 287, - /* 1600 */ 81, 80, 80, 291, 80, 47, 80, 80, 80, 47, - /* 1610 */ 81, 81, 269, 241, 91, 80, 47, 80, 47, 307, - /* 1620 */ 277, 81, 310, 311, 312, 313, 314, 315, 80, 317, - /* 1630 */ 287, 81, 47, 80, 291, 81, 47, 104, 104, 80, - /* 1640 */ 104, 269, 104, 22, 80, 80, 47, 80, 47, 277, - /* 1650 */ 307, 22, 92, 310, 311, 312, 313, 314, 315, 287, - /* 1660 */ 317, 59, 241, 291, 58, 78, 41, 64, 12, 13, - /* 1670 */ 47, 47, 22, 241, 113, 47, 64, 47, 22, 307, - /* 1680 */ 47, 47, 310, 311, 312, 313, 314, 315, 47, 317, - /* 1690 */ 269, 47, 47, 47, 47, 47, 47, 0, 277, 47, - /* 1700 */ 47, 269, 45, 47, 37, 0, 47, 45, 287, 277, - /* 1710 */ 37, 0, 291, 47, 45, 37, 0, 47, 37, 287, - /* 1720 */ 64, 45, 0, 291, 241, 0, 47, 0, 307, 46, - /* 1730 */ 22, 310, 311, 312, 313, 314, 315, 22, 317, 307, - /* 1740 */ 21, 20, 310, 311, 312, 313, 314, 315, 22, 317, - /* 1750 */ 21, 358, 269, 241, 358, 358, 358, 358, 102, 358, - /* 1760 */ 277, 358, 358, 358, 358, 358, 358, 358, 358, 113, - /* 1770 */ 287, 358, 358, 358, 291, 358, 358, 358, 358, 358, - /* 1780 */ 358, 269, 358, 358, 358, 241, 358, 358, 358, 277, - /* 1790 */ 307, 358, 358, 310, 311, 312, 313, 314, 315, 287, - /* 1800 */ 317, 358, 358, 291, 358, 358, 358, 358, 358, 358, - /* 1810 */ 358, 358, 156, 269, 358, 358, 358, 358, 358, 307, - /* 1820 */ 358, 277, 310, 311, 312, 313, 314, 315, 358, 317, - /* 1830 */ 358, 287, 358, 358, 178, 291, 358, 358, 358, 358, - /* 1840 */ 358, 241, 358, 358, 358, 189, 190, 191, 358, 358, - /* 1850 */ 358, 307, 358, 358, 310, 311, 312, 313, 314, 315, - /* 1860 */ 358, 317, 358, 358, 358, 358, 358, 358, 358, 269, - /* 1870 */ 358, 358, 241, 358, 358, 358, 358, 277, 358, 358, - /* 1880 */ 358, 358, 358, 358, 358, 358, 358, 287, 358, 358, - /* 1890 */ 358, 291, 358, 358, 358, 358, 358, 358, 358, 358, - /* 1900 */ 269, 241, 358, 358, 358, 358, 358, 307, 277, 358, - /* 1910 */ 310, 311, 312, 313, 314, 315, 358, 317, 287, 358, - /* 1920 */ 358, 358, 291, 358, 358, 358, 358, 358, 358, 269, - /* 1930 */ 358, 358, 241, 358, 358, 358, 358, 277, 307, 358, - /* 1940 */ 358, 310, 311, 312, 313, 314, 315, 287, 317, 358, - /* 1950 */ 358, 291, 358, 358, 358, 358, 358, 358, 358, 358, - /* 1960 */ 269, 241, 358, 358, 358, 358, 358, 307, 277, 358, - /* 1970 */ 310, 311, 312, 313, 314, 315, 358, 317, 287, 358, - /* 1980 */ 358, 358, 291, 358, 358, 358, 358, 358, 358, 269, - /* 1990 */ 358, 358, 358, 358, 358, 358, 358, 277, 307, 358, - /* 2000 */ 358, 310, 311, 312, 313, 314, 315, 287, 317, 358, - /* 2010 */ 358, 291, 241, 358, 358, 358, 358, 358, 358, 358, - /* 2020 */ 358, 358, 358, 241, 358, 358, 358, 307, 358, 358, - /* 2030 */ 310, 311, 312, 313, 314, 315, 358, 317, 358, 358, - /* 2040 */ 269, 358, 358, 358, 358, 358, 358, 358, 277, 358, - /* 2050 */ 358, 269, 358, 358, 358, 358, 358, 358, 287, 277, - /* 2060 */ 358, 358, 291, 358, 358, 358, 358, 358, 358, 287, - /* 2070 */ 358, 358, 358, 291, 358, 358, 358, 241, 307, 358, - /* 2080 */ 358, 310, 311, 312, 313, 314, 315, 358, 317, 307, - /* 2090 */ 358, 358, 310, 311, 312, 313, 314, 315, 358, 317, - /* 2100 */ 358, 358, 358, 358, 358, 269, 358, 358, 358, 358, - /* 2110 */ 358, 358, 358, 277, 358, 358, 358, 358, 358, 358, - /* 2120 */ 358, 358, 358, 287, 358, 358, 358, 291, 358, 358, - /* 2130 */ 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, - /* 2140 */ 358, 358, 358, 307, 358, 358, 310, 311, 312, 313, - /* 2150 */ 314, 315, 358, 317, + /* 0 */ 246, 273, 248, 249, 276, 246, 250, 248, 249, 324, + /* 10 */ 325, 4, 12, 13, 242, 271, 244, 243, 262, 339, + /* 20 */ 20, 293, 22, 279, 20, 269, 12, 13, 14, 15, + /* 30 */ 16, 247, 352, 255, 250, 279, 356, 309, 310, 12, + /* 40 */ 13, 14, 15, 16, 271, 271, 268, 47, 320, 42, + /* 50 */ 43, 278, 47, 279, 250, 277, 283, 55, 58, 315, + /* 60 */ 12, 13, 14, 289, 64, 2, 262, 293, 20, 64, + /* 70 */ 22, 0, 58, 299, 20, 12, 13, 14, 15, 16, + /* 80 */ 4, 81, 80, 279, 310, 83, 250, 313, 314, 315, + /* 90 */ 316, 317, 318, 0, 320, 47, 82, 323, 262, 251, + /* 100 */ 252, 327, 328, 103, 90, 269, 58, 12, 13, 14, + /* 110 */ 15, 16, 64, 339, 114, 279, 270, 24, 25, 26, + /* 120 */ 27, 28, 29, 30, 31, 32, 352, 75, 282, 81, + /* 130 */ 356, 60, 61, 62, 63, 81, 65, 66, 67, 68, + /* 140 */ 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, + /* 150 */ 289, 103, 279, 292, 140, 81, 295, 81, 158, 286, + /* 160 */ 21, 339, 114, 24, 25, 26, 27, 28, 29, 30, + /* 170 */ 31, 32, 120, 121, 352, 250, 162, 150, 356, 247, + /* 180 */ 180, 181, 250, 183, 184, 185, 186, 187, 188, 189, + /* 190 */ 190, 191, 192, 193, 194, 195, 196, 197, 0, 12, + /* 200 */ 13, 14, 15, 16, 167, 168, 158, 256, 171, 284, + /* 210 */ 210, 260, 198, 199, 200, 201, 202, 203, 204, 205, + /* 220 */ 206, 207, 0, 12, 13, 14, 15, 16, 180, 181, + /* 230 */ 250, 183, 184, 185, 186, 187, 188, 189, 190, 191, + /* 240 */ 192, 193, 194, 195, 196, 197, 12, 13, 0, 20, + /* 250 */ 253, 22, 60, 61, 20, 57, 22, 65, 231, 279, + /* 260 */ 68, 69, 263, 339, 72, 73, 74, 12, 13, 82, + /* 270 */ 271, 274, 243, 57, 20, 20, 352, 22, 49, 280, + /* 280 */ 356, 47, 60, 61, 210, 20, 210, 65, 255, 47, + /* 290 */ 68, 69, 58, 82, 72, 73, 74, 317, 64, 0, + /* 300 */ 271, 14, 47, 243, 243, 250, 64, 20, 279, 250, + /* 310 */ 277, 331, 332, 333, 334, 81, 336, 262, 289, 64, + /* 320 */ 21, 262, 293, 24, 25, 26, 27, 28, 29, 30, + /* 330 */ 31, 32, 271, 243, 279, 81, 81, 103, 279, 310, + /* 340 */ 279, 93, 313, 314, 315, 316, 317, 318, 114, 320, + /* 350 */ 289, 20, 323, 293, 293, 271, 327, 328, 103, 271, + /* 360 */ 112, 113, 278, 115, 116, 117, 243, 283, 280, 114, + /* 370 */ 259, 310, 261, 271, 313, 314, 315, 316, 317, 318, + /* 380 */ 278, 320, 250, 293, 323, 283, 311, 2, 327, 328, + /* 390 */ 329, 20, 158, 3, 262, 257, 258, 12, 13, 14, + /* 400 */ 15, 16, 341, 12, 13, 14, 15, 16, 347, 348, + /* 410 */ 335, 279, 81, 158, 180, 181, 293, 183, 184, 185, + /* 420 */ 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, + /* 430 */ 196, 197, 250, 257, 258, 180, 181, 243, 183, 184, + /* 440 */ 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, + /* 450 */ 195, 196, 197, 12, 13, 14, 243, 4, 20, 19, + /* 460 */ 281, 20, 243, 22, 210, 210, 264, 288, 289, 267, + /* 470 */ 14, 184, 19, 33, 12, 13, 20, 273, 243, 243, + /* 480 */ 273, 299, 20, 276, 22, 45, 33, 293, 47, 250, + /* 490 */ 144, 51, 52, 53, 54, 55, 14, 293, 45, 240, + /* 500 */ 293, 262, 20, 50, 57, 64, 293, 271, 55, 47, + /* 510 */ 153, 250, 293, 309, 310, 279, 309, 310, 279, 250, + /* 520 */ 80, 339, 81, 83, 320, 289, 64, 320, 293, 293, + /* 530 */ 173, 174, 64, 80, 352, 299, 83, 146, 356, 289, + /* 540 */ 279, 210, 41, 81, 103, 295, 310, 20, 279, 313, + /* 550 */ 314, 315, 316, 317, 318, 114, 320, 117, 299, 323, + /* 560 */ 243, 215, 216, 327, 328, 103, 12, 13, 14, 15, + /* 570 */ 16, 273, 80, 263, 272, 339, 114, 93, 317, 92, + /* 580 */ 88, 271, 142, 82, 146, 145, 317, 0, 352, 81, + /* 590 */ 280, 293, 356, 332, 333, 334, 253, 336, 339, 158, + /* 600 */ 116, 332, 333, 334, 164, 336, 166, 309, 310, 266, + /* 610 */ 293, 352, 58, 288, 289, 356, 226, 274, 320, 4, + /* 620 */ 158, 180, 181, 243, 183, 184, 185, 186, 187, 188, + /* 630 */ 189, 190, 191, 192, 193, 194, 195, 196, 197, 35, + /* 640 */ 184, 149, 180, 181, 90, 183, 184, 185, 186, 187, + /* 650 */ 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, + /* 660 */ 12, 13, 18, 93, 20, 157, 180, 159, 20, 250, + /* 670 */ 22, 27, 250, 293, 30, 14, 15, 16, 208, 209, + /* 680 */ 93, 262, 112, 113, 262, 115, 116, 117, 84, 37, + /* 690 */ 86, 87, 48, 89, 140, 47, 22, 93, 279, 112, + /* 700 */ 113, 279, 115, 116, 117, 219, 220, 221, 222, 223, + /* 710 */ 271, 263, 64, 1, 2, 243, 162, 311, 210, 271, + /* 720 */ 116, 47, 283, 42, 43, 311, 169, 170, 280, 81, + /* 730 */ 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, + /* 740 */ 104, 335, 106, 107, 108, 109, 110, 111, 299, 335, + /* 750 */ 243, 103, 198, 199, 200, 201, 202, 203, 204, 205, + /* 760 */ 206, 207, 114, 119, 0, 293, 122, 123, 124, 125, + /* 770 */ 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, + /* 780 */ 136, 137, 138, 139, 250, 250, 18, 250, 339, 20, + /* 790 */ 41, 23, 20, 20, 82, 140, 262, 262, 272, 262, + /* 800 */ 293, 352, 243, 35, 36, 356, 158, 39, 243, 243, + /* 810 */ 243, 299, 243, 279, 279, 243, 279, 162, 250, 243, + /* 820 */ 243, 57, 243, 146, 56, 148, 211, 272, 180, 181, + /* 830 */ 262, 183, 184, 185, 186, 187, 188, 189, 190, 191, + /* 840 */ 192, 193, 194, 195, 196, 197, 41, 279, 271, 81, + /* 850 */ 272, 339, 293, 198, 0, 272, 279, 0, 293, 293, + /* 860 */ 293, 209, 293, 0, 352, 293, 289, 0, 356, 293, + /* 870 */ 293, 85, 293, 85, 88, 85, 88, 85, 88, 22, + /* 880 */ 88, 243, 58, 21, 272, 22, 118, 310, 81, 22, + /* 890 */ 313, 314, 315, 316, 317, 318, 34, 320, 91, 45, + /* 900 */ 323, 1, 2, 272, 327, 328, 329, 244, 303, 271, + /* 910 */ 195, 196, 44, 260, 243, 146, 47, 279, 150, 151, + /* 920 */ 152, 41, 41, 155, 350, 348, 47, 289, 160, 41, + /* 930 */ 359, 293, 41, 41, 41, 307, 344, 271, 249, 41, + /* 940 */ 172, 251, 271, 175, 282, 177, 178, 179, 310, 81, + /* 950 */ 279, 313, 314, 315, 316, 317, 318, 184, 320, 312, + /* 960 */ 289, 323, 82, 82, 293, 327, 328, 329, 20, 41, + /* 970 */ 82, 41, 337, 82, 82, 82, 338, 228, 210, 243, + /* 980 */ 82, 310, 353, 114, 313, 314, 315, 316, 317, 318, + /* 990 */ 353, 320, 41, 114, 323, 243, 41, 41, 327, 328, + /* 1000 */ 329, 250, 41, 353, 180, 340, 41, 271, 45, 338, + /* 1010 */ 82, 308, 82, 47, 257, 279, 41, 41, 301, 40, + /* 1020 */ 156, 250, 287, 271, 250, 289, 140, 243, 285, 293, + /* 1030 */ 285, 279, 250, 82, 20, 230, 245, 82, 82, 245, + /* 1040 */ 20, 289, 305, 82, 255, 293, 310, 82, 289, 313, + /* 1050 */ 314, 315, 316, 317, 318, 271, 320, 82, 82, 323, + /* 1060 */ 20, 255, 310, 279, 328, 313, 314, 315, 316, 317, + /* 1070 */ 318, 20, 320, 289, 297, 323, 300, 293, 243, 327, + /* 1080 */ 328, 329, 255, 299, 297, 255, 20, 279, 290, 255, + /* 1090 */ 338, 255, 250, 243, 310, 255, 245, 313, 314, 315, + /* 1100 */ 316, 317, 318, 271, 320, 250, 271, 64, 271, 271, + /* 1110 */ 271, 245, 271, 293, 279, 271, 271, 271, 271, 271, + /* 1120 */ 271, 271, 305, 339, 289, 253, 304, 253, 293, 279, + /* 1130 */ 297, 253, 165, 290, 299, 279, 352, 253, 20, 289, + /* 1140 */ 356, 289, 218, 293, 243, 310, 349, 294, 313, 314, + /* 1150 */ 315, 316, 317, 318, 217, 320, 225, 346, 293, 349, + /* 1160 */ 310, 345, 224, 313, 314, 315, 316, 317, 318, 294, + /* 1170 */ 320, 312, 271, 323, 339, 293, 293, 327, 328, 20, + /* 1180 */ 279, 213, 212, 209, 279, 40, 229, 352, 311, 360, + /* 1190 */ 289, 356, 232, 81, 293, 293, 243, 293, 227, 293, + /* 1200 */ 12, 13, 294, 343, 294, 243, 330, 143, 291, 290, + /* 1210 */ 22, 310, 342, 355, 313, 314, 315, 316, 317, 318, + /* 1220 */ 319, 320, 321, 322, 271, 354, 279, 81, 355, 326, + /* 1230 */ 253, 267, 279, 271, 253, 47, 250, 279, 253, 354, + /* 1240 */ 245, 279, 289, 261, 355, 302, 293, 275, 354, 265, + /* 1250 */ 306, 289, 64, 243, 265, 293, 265, 298, 296, 254, + /* 1260 */ 241, 0, 0, 310, 72, 0, 313, 314, 315, 316, + /* 1270 */ 317, 318, 310, 320, 47, 313, 314, 315, 316, 317, + /* 1280 */ 318, 271, 320, 176, 47, 243, 47, 47, 0, 279, + /* 1290 */ 176, 103, 47, 47, 0, 176, 47, 0, 47, 289, + /* 1300 */ 0, 47, 114, 293, 0, 81, 162, 114, 161, 158, + /* 1310 */ 357, 358, 0, 271, 0, 154, 153, 243, 0, 0, + /* 1320 */ 310, 279, 44, 313, 314, 315, 316, 317, 318, 0, + /* 1330 */ 320, 289, 0, 0, 0, 293, 0, 0, 0, 0, + /* 1340 */ 0, 0, 0, 0, 0, 271, 158, 0, 0, 0, + /* 1350 */ 0, 0, 310, 279, 0, 313, 314, 315, 316, 317, + /* 1360 */ 318, 351, 320, 289, 40, 243, 0, 293, 180, 0, + /* 1370 */ 296, 0, 0, 0, 22, 0, 0, 0, 243, 191, + /* 1380 */ 192, 193, 0, 0, 310, 0, 40, 313, 314, 315, + /* 1390 */ 316, 317, 318, 271, 320, 14, 37, 41, 14, 44, + /* 1400 */ 358, 279, 0, 38, 44, 37, 271, 0, 0, 0, + /* 1410 */ 0, 289, 0, 0, 279, 293, 37, 37, 243, 0, + /* 1420 */ 59, 0, 0, 37, 289, 47, 0, 37, 293, 37, + /* 1430 */ 0, 296, 310, 37, 45, 313, 314, 315, 316, 317, + /* 1440 */ 318, 47, 320, 45, 322, 310, 271, 47, 313, 314, + /* 1450 */ 315, 316, 317, 318, 279, 320, 45, 0, 33, 0, + /* 1460 */ 45, 47, 0, 0, 289, 47, 243, 0, 293, 22, + /* 1470 */ 45, 296, 41, 41, 47, 22, 51, 52, 53, 54, + /* 1480 */ 55, 90, 47, 88, 47, 310, 47, 47, 313, 314, + /* 1490 */ 315, 316, 317, 318, 271, 320, 47, 47, 0, 22, + /* 1500 */ 47, 0, 279, 22, 48, 80, 0, 22, 83, 0, + /* 1510 */ 22, 47, 289, 0, 22, 20, 293, 64, 0, 47, + /* 1520 */ 146, 0, 22, 243, 0, 163, 0, 0, 37, 81, + /* 1530 */ 143, 41, 81, 310, 141, 41, 313, 314, 315, 316, + /* 1540 */ 317, 318, 82, 320, 214, 146, 41, 82, 82, 81, + /* 1550 */ 81, 271, 41, 41, 44, 81, 103, 82, 44, 279, + /* 1560 */ 82, 44, 82, 41, 44, 208, 141, 114, 143, 289, + /* 1570 */ 145, 243, 147, 293, 41, 47, 47, 214, 214, 82, + /* 1580 */ 2, 47, 47, 47, 47, 180, 41, 44, 44, 82, + /* 1590 */ 310, 166, 22, 313, 314, 315, 316, 317, 318, 271, + /* 1600 */ 320, 243, 182, 0, 81, 144, 81, 279, 44, 37, + /* 1610 */ 91, 158, 82, 81, 141, 22, 47, 289, 44, 81, + /* 1620 */ 81, 293, 82, 92, 82, 59, 47, 81, 81, 271, + /* 1630 */ 81, 243, 81, 180, 181, 82, 81, 279, 310, 81, + /* 1640 */ 81, 313, 314, 315, 316, 317, 318, 289, 320, 82, + /* 1650 */ 82, 293, 47, 47, 81, 47, 47, 82, 81, 271, + /* 1660 */ 22, 105, 82, 81, 47, 82, 81, 279, 310, 93, + /* 1670 */ 105, 313, 314, 315, 316, 317, 318, 289, 320, 243, + /* 1680 */ 81, 293, 81, 81, 47, 22, 105, 58, 243, 105, + /* 1690 */ 47, 64, 79, 41, 47, 47, 47, 47, 310, 22, + /* 1700 */ 114, 313, 314, 315, 316, 317, 318, 271, 320, 47, + /* 1710 */ 64, 47, 47, 47, 47, 279, 271, 47, 47, 47, + /* 1720 */ 47, 47, 0, 47, 279, 289, 45, 37, 0, 293, + /* 1730 */ 47, 45, 37, 0, 289, 47, 243, 45, 293, 37, + /* 1740 */ 0, 47, 45, 37, 0, 47, 310, 46, 0, 313, + /* 1750 */ 314, 315, 316, 317, 318, 310, 320, 0, 313, 314, + /* 1760 */ 315, 316, 317, 318, 271, 320, 20, 22, 21, 361, + /* 1770 */ 361, 22, 279, 22, 21, 361, 361, 361, 361, 361, + /* 1780 */ 361, 361, 289, 361, 243, 361, 293, 361, 361, 361, + /* 1790 */ 361, 361, 361, 361, 361, 243, 361, 361, 361, 361, + /* 1800 */ 361, 361, 361, 310, 361, 361, 313, 314, 315, 316, + /* 1810 */ 317, 318, 271, 320, 243, 361, 361, 361, 361, 361, + /* 1820 */ 279, 361, 361, 271, 361, 361, 361, 361, 361, 361, + /* 1830 */ 289, 279, 361, 361, 293, 361, 361, 361, 361, 361, + /* 1840 */ 361, 289, 271, 361, 361, 293, 361, 361, 361, 361, + /* 1850 */ 279, 310, 361, 361, 313, 314, 315, 316, 317, 318, + /* 1860 */ 289, 320, 310, 361, 293, 313, 314, 315, 316, 317, + /* 1870 */ 318, 361, 320, 243, 361, 361, 361, 361, 361, 361, + /* 1880 */ 361, 310, 243, 361, 313, 314, 315, 316, 317, 318, + /* 1890 */ 361, 320, 361, 361, 361, 361, 361, 361, 361, 361, + /* 1900 */ 361, 271, 361, 361, 361, 361, 361, 361, 361, 279, + /* 1910 */ 271, 361, 361, 361, 361, 361, 361, 361, 279, 289, + /* 1920 */ 361, 361, 361, 293, 361, 361, 361, 361, 289, 361, + /* 1930 */ 361, 361, 293, 361, 361, 361, 361, 361, 361, 361, + /* 1940 */ 310, 361, 243, 313, 314, 315, 316, 317, 318, 310, + /* 1950 */ 320, 243, 313, 314, 315, 316, 317, 318, 361, 320, + /* 1960 */ 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, + /* 1970 */ 271, 361, 361, 361, 361, 361, 361, 361, 279, 271, + /* 1980 */ 361, 361, 361, 361, 361, 361, 361, 279, 289, 361, + /* 1990 */ 361, 361, 293, 361, 361, 361, 361, 289, 361, 243, + /* 2000 */ 361, 293, 361, 361, 361, 361, 361, 361, 361, 310, + /* 2010 */ 250, 361, 313, 314, 315, 316, 317, 318, 310, 320, + /* 2020 */ 361, 313, 314, 315, 316, 317, 318, 271, 320, 361, + /* 2030 */ 361, 361, 361, 361, 361, 279, 361, 361, 361, 279, + /* 2040 */ 361, 361, 361, 361, 361, 289, 250, 361, 361, 293, + /* 2050 */ 361, 361, 361, 361, 361, 361, 361, 361, 361, 299, + /* 2060 */ 361, 361, 361, 361, 361, 361, 310, 361, 361, 313, + /* 2070 */ 314, 315, 316, 317, 318, 279, 320, 317, 361, 361, + /* 2080 */ 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, + /* 2090 */ 361, 361, 332, 333, 334, 299, 336, 361, 361, 339, + /* 2100 */ 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, + /* 2110 */ 361, 361, 352, 317, 361, 361, 356, 361, 361, 361, + /* 2120 */ 361, 361, 361, 361, 361, 361, 361, 361, 332, 333, + /* 2130 */ 334, 361, 336, 361, 361, 339, 361, 361, 361, 361, + /* 2140 */ 361, 361, 361, 361, 361, 361, 361, 361, 352, 361, + /* 2150 */ 361, 361, 356, }; -#define YY_SHIFT_COUNT (604) +#define YY_SHIFT_COUNT (610) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (1729) +#define YY_SHIFT_MAX (1757) static const unsigned short int yy_shift_ofst[] = { - /* 0 */ 739, 0, 0, 48, 96, 96, 96, 96, 280, 280, - /* 10 */ 96, 96, 328, 376, 560, 376, 376, 376, 376, 376, - /* 20 */ 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, - /* 30 */ 376, 376, 376, 376, 376, 376, 376, 376, 37, 37, - /* 40 */ 68, 68, 68, 1656, 1656, 1656, 1656, 64, 271, 179, - /* 50 */ 18, 18, 13, 13, 123, 179, 179, 18, 18, 18, - /* 60 */ 18, 18, 18, 35, 18, 182, 190, 314, 182, 18, - /* 70 */ 18, 182, 18, 182, 182, 314, 182, 18, 334, 556, - /* 80 */ 343, 107, 107, 192, 312, 746, 746, 746, 746, 746, - /* 90 */ 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, - /* 100 */ 746, 746, 746, 746, 441, 580, 110, 110, 206, 720, - /* 110 */ 393, 393, 393, 244, 720, 384, 314, 182, 182, 314, - /* 120 */ 356, 466, 549, 549, 549, 549, 549, 549, 549, 1309, - /* 130 */ 294, 197, 21, 78, 268, 279, 517, 583, 694, 292, - /* 140 */ 502, 611, 576, 687, 576, 598, 598, 598, 756, 750, - /* 150 */ 982, 964, 965, 866, 982, 982, 981, 890, 890, 982, - /* 160 */ 1019, 1019, 1032, 35, 314, 35, 1040, 35, 384, 1049, - /* 170 */ 35, 35, 982, 35, 1019, 182, 182, 182, 182, 182, - /* 180 */ 182, 182, 182, 182, 182, 182, 982, 1019, 1016, 1032, - /* 190 */ 334, 957, 314, 334, 1040, 334, 384, 1049, 334, 1114, - /* 200 */ 925, 927, 1016, 925, 927, 1016, 1016, 182, 923, 938, - /* 210 */ 939, 951, 967, 384, 1161, 1146, 961, 972, 966, 961, - /* 220 */ 972, 961, 972, 1118, 927, 1016, 1016, 927, 1016, 1063, - /* 230 */ 384, 1049, 334, 356, 334, 384, 1167, 466, 982, 334, - /* 240 */ 1019, 2154, 2154, 2154, 2154, 2154, 2154, 2154, 2154, 635, - /* 250 */ 928, 1184, 1043, 921, 997, 119, 14, 30, 485, 126, - /* 260 */ 498, 354, 354, 354, 354, 354, 354, 354, 354, 309, - /* 270 */ 321, 415, 404, 8, 445, 622, 622, 622, 622, 772, - /* 280 */ 774, 734, 742, 763, 765, 435, 878, 883, 832, 623, - /* 290 */ 724, 830, 857, 863, 879, 725, 751, 787, 865, 796, - /* 300 */ 869, 792, 872, 876, 881, 894, 904, 773, 854, 914, - /* 310 */ 917, 930, 943, 946, 947, 859, 820, 1275, 1276, 1205, - /* 320 */ 1279, 1233, 1107, 1235, 1236, 1237, 1112, 1287, 1242, 1243, - /* 330 */ 1117, 1293, 1247, 1295, 1249, 1297, 1251, 1303, 1225, 1149, - /* 340 */ 1147, 1197, 1163, 1317, 1322, 1171, 1174, 1329, 1330, 1288, - /* 350 */ 1331, 1333, 1334, 1336, 1337, 1338, 1339, 1340, 1341, 1347, - /* 360 */ 1348, 1350, 1351, 1353, 1355, 1356, 1357, 1358, 1304, 1359, - /* 370 */ 1366, 1367, 1376, 1377, 1378, 1321, 1379, 1380, 1381, 1383, - /* 380 */ 1384, 1386, 1349, 1360, 1346, 1385, 1354, 1387, 1361, 1390, - /* 390 */ 1362, 1365, 1393, 1394, 1395, 1403, 1369, 1404, 1352, 1407, - /* 400 */ 1408, 1363, 1364, 1389, 1414, 1382, 1388, 1391, 1423, 1392, - /* 410 */ 1396, 1397, 1424, 1398, 1399, 1400, 1427, 1430, 1440, 1442, - /* 420 */ 1373, 1345, 1401, 1425, 1443, 1402, 1405, 1416, 1419, 1426, - /* 430 */ 1428, 1429, 1431, 1433, 1446, 1448, 1468, 1452, 1435, 1475, - /* 440 */ 1455, 1434, 1479, 1462, 1485, 1465, 1469, 1488, 1368, 1444, - /* 450 */ 1490, 1335, 1470, 1371, 1375, 1494, 1495, 1498, 1420, 1466, - /* 460 */ 1406, 1458, 1463, 1307, 1439, 1480, 1441, 1447, 1449, 1450, - /* 470 */ 1445, 1482, 1484, 1496, 1464, 1491, 1342, 1461, 1467, 1501, - /* 480 */ 1318, 1508, 1507, 1471, 1514, 1344, 1476, 1511, 1513, 1515, - /* 490 */ 1517, 1518, 1525, 1476, 1557, 1409, 1520, 1493, 1497, 1499, - /* 500 */ 1531, 1502, 1503, 1532, 1556, 1410, 1505, 1510, 1516, 1506, - /* 510 */ 1521, 1436, 1522, 1588, 1555, 1453, 1524, 1504, 1551, 1552, - /* 520 */ 1526, 1519, 1527, 1576, 1528, 1523, 1529, 1558, 1562, 1535, - /* 530 */ 1530, 1569, 1537, 1540, 1571, 1548, 1550, 1585, 1553, 1554, - /* 540 */ 1589, 1559, 1533, 1534, 1536, 1538, 1621, 1560, 1564, 1565, - /* 550 */ 1599, 1567, 1561, 1601, 1629, 1602, 1606, 1603, 1587, 1625, - /* 560 */ 1623, 1624, 1628, 1630, 1633, 1650, 1634, 1641, 1612, 1426, - /* 570 */ 1644, 1428, 1645, 1646, 1647, 1648, 1649, 1652, 1697, 1653, - /* 580 */ 1657, 1667, 1705, 1659, 1662, 1673, 1711, 1666, 1669, 1678, - /* 590 */ 1716, 1670, 1676, 1681, 1722, 1679, 1683, 1725, 1727, 1708, - /* 600 */ 1719, 1715, 1726, 1729, 1721, + /* 0 */ 768, 0, 0, 48, 234, 234, 234, 234, 255, 255, + /* 10 */ 234, 234, 441, 462, 648, 462, 462, 462, 462, 462, + /* 20 */ 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, + /* 30 */ 462, 462, 462, 462, 462, 462, 462, 462, 254, 254, + /* 40 */ 54, 54, 54, 1188, 1188, 1188, 1188, 331, 508, 74, + /* 50 */ 4, 4, 7, 7, 76, 74, 74, 4, 4, 4, + /* 60 */ 4, 4, 4, 216, 4, 265, 371, 527, 265, 4, + /* 70 */ 4, 265, 4, 265, 265, 527, 265, 4, 447, 644, + /* 80 */ 14, 554, 554, 139, 192, 1453, 1453, 1453, 1453, 1453, + /* 90 */ 1453, 1453, 1453, 1453, 1453, 1453, 1453, 1453, 1453, 1453, + /* 100 */ 1453, 1453, 1453, 1453, 604, 229, 482, 482, 198, 5, + /* 110 */ 438, 438, 438, 764, 5, 772, 527, 265, 265, 527, + /* 120 */ 487, 468, 636, 636, 636, 636, 636, 636, 636, 440, + /* 130 */ 299, 222, 27, 486, 37, 346, 287, 456, 681, 674, + /* 140 */ 484, 769, 470, 652, 470, 390, 390, 390, 615, 773, + /* 150 */ 948, 963, 966, 864, 948, 948, 979, 886, 886, 948, + /* 160 */ 1014, 1014, 1020, 216, 527, 216, 1040, 1051, 216, 1040, + /* 170 */ 216, 772, 1066, 216, 216, 948, 216, 1014, 265, 265, + /* 180 */ 265, 265, 265, 265, 265, 265, 265, 265, 265, 948, + /* 190 */ 1014, 1043, 1020, 447, 967, 527, 447, 1040, 447, 772, + /* 200 */ 1066, 447, 1118, 924, 937, 1043, 924, 937, 1043, 1043, + /* 210 */ 265, 931, 938, 968, 970, 974, 772, 1159, 1145, 957, + /* 220 */ 971, 960, 957, 971, 957, 971, 1112, 937, 1043, 1043, + /* 230 */ 937, 1043, 1064, 772, 1066, 447, 487, 447, 772, 1146, + /* 240 */ 468, 948, 447, 1014, 2153, 2153, 2153, 2153, 2153, 2153, + /* 250 */ 2153, 2153, 71, 1425, 93, 453, 248, 587, 187, 63, + /* 260 */ 385, 391, 211, 570, 95, 95, 95, 95, 95, 95, + /* 270 */ 95, 95, 357, 2, 52, 492, 712, 655, 661, 661, + /* 280 */ 661, 661, 854, 501, 786, 788, 790, 792, 857, 863, + /* 290 */ 867, 862, 557, 677, 880, 881, 888, 900, 715, 749, + /* 300 */ 805, 891, 824, 892, 868, 893, 898, 928, 930, 951, + /* 310 */ 869, 879, 955, 956, 961, 965, 975, 976, 807, 242, + /* 320 */ 1261, 1262, 1192, 1265, 1227, 1107, 1237, 1239, 1240, 1114, + /* 330 */ 1288, 1245, 1246, 1119, 1294, 1249, 1297, 1251, 1300, 1254, + /* 340 */ 1304, 1224, 1144, 1147, 1193, 1151, 1312, 1314, 1161, 1163, + /* 350 */ 1318, 1319, 1278, 1329, 1332, 1333, 1334, 1336, 1337, 1338, + /* 360 */ 1339, 1340, 1341, 1342, 1343, 1344, 1347, 1348, 1349, 1350, + /* 370 */ 1351, 1324, 1354, 1366, 1369, 1371, 1372, 1373, 1352, 1375, + /* 380 */ 1376, 1377, 1382, 1383, 1385, 1346, 1359, 1356, 1381, 1355, + /* 390 */ 1384, 1360, 1402, 1365, 1368, 1407, 1408, 1409, 1379, 1410, + /* 400 */ 1412, 1380, 1413, 1361, 1419, 1421, 1378, 1389, 1386, 1422, + /* 410 */ 1394, 1398, 1390, 1426, 1400, 1411, 1392, 1430, 1414, 1415, + /* 420 */ 1396, 1457, 1459, 1462, 1463, 1391, 1395, 1418, 1447, 1467, + /* 430 */ 1427, 1435, 1437, 1439, 1431, 1432, 1440, 1449, 1450, 1498, + /* 440 */ 1477, 1501, 1481, 1456, 1506, 1485, 1464, 1509, 1488, 1513, + /* 450 */ 1492, 1495, 1518, 1374, 1472, 1521, 1362, 1500, 1399, 1387, + /* 460 */ 1524, 1526, 1527, 1448, 1491, 1393, 1490, 1494, 1330, 1460, + /* 470 */ 1505, 1465, 1451, 1468, 1469, 1466, 1511, 1510, 1514, 1474, + /* 480 */ 1512, 1363, 1475, 1478, 1517, 1357, 1522, 1520, 1480, 1533, + /* 490 */ 1364, 1497, 1528, 1529, 1534, 1535, 1536, 1537, 1497, 1578, + /* 500 */ 1405, 1545, 1507, 1523, 1530, 1543, 1525, 1532, 1544, 1570, + /* 510 */ 1420, 1538, 1540, 1542, 1539, 1546, 1461, 1547, 1603, 1572, + /* 520 */ 1473, 1549, 1519, 1564, 1574, 1551, 1553, 1555, 1593, 1558, + /* 530 */ 1531, 1567, 1569, 1579, 1559, 1568, 1605, 1573, 1575, 1606, + /* 540 */ 1577, 1580, 1608, 1582, 1583, 1609, 1585, 1556, 1565, 1581, + /* 550 */ 1584, 1638, 1576, 1599, 1601, 1617, 1602, 1586, 1637, 1663, + /* 560 */ 1566, 1629, 1643, 1627, 1613, 1652, 1647, 1648, 1649, 1650, + /* 570 */ 1662, 1677, 1664, 1665, 1646, 1431, 1666, 1432, 1667, 1670, + /* 580 */ 1671, 1672, 1673, 1674, 1722, 1676, 1681, 1690, 1728, 1683, + /* 590 */ 1686, 1695, 1733, 1688, 1692, 1702, 1740, 1694, 1697, 1706, + /* 600 */ 1744, 1698, 1701, 1748, 1757, 1745, 1747, 1749, 1751, 1753, + /* 610 */ 1746, }; -#define YY_REDUCE_COUNT (248) -#define YY_REDUCE_MIN (-335) -#define YY_REDUCE_MAX (1836) +#define YY_REDUCE_COUNT (251) +#define YY_REDUCE_MIN (-320) +#define YY_REDUCE_MAX (1796) static const short yy_reduce_ofst[] = { - /* 0 */ -190, -238, 489, 520, 308, 616, 679, 731, 790, 841, - /* 10 */ 858, 916, 948, -140, 1001, 1058, 138, 1105, 1144, 1195, - /* 20 */ 1224, 1256, 1312, 1343, 1372, 1421, 1432, 1483, 1512, 1544, - /* 30 */ 1600, 1631, 1660, 1691, 1720, 1771, 1782, 1836, 546, 971, - /* 40 */ 88, -1, 113, -266, -181, 46, 103, 239, -286, -85, - /* 50 */ -163, 87, -240, -223, -335, -334, -253, -49, 85, 135, - /* 60 */ 248, 331, 511, -172, 521, -124, -111, 56, 136, 564, - /* 70 */ 591, -21, 613, 216, 130, 152, 320, 614, -173, -177, - /* 80 */ -256, -256, -256, -188, -105, -86, 213, 261, 287, 370, - /* 90 */ 375, 385, 422, 423, 424, 426, 427, 428, 429, 430, - /* 100 */ 431, 434, 482, 486, -100, 4, 305, 348, 63, 473, - /* 110 */ -83, 432, 455, 355, 476, 94, 492, 457, -8, 447, - /* 120 */ 543, -95, -270, 225, 235, 317, 369, 390, 593, 516, - /* 130 */ 654, 650, 578, 590, 639, 618, 683, 683, 707, 711, - /* 140 */ 684, 659, 629, 629, 629, 619, 626, 628, 664, 683, - /* 150 */ 758, 712, 761, 721, 775, 777, 741, 747, 757, 786, - /* 160 */ 806, 807, 752, 800, 771, 808, 768, 811, 788, 783, - /* 170 */ 821, 822, 825, 826, 835, 814, 815, 816, 817, 827, - /* 180 */ 837, 845, 846, 847, 848, 850, 842, 851, 804, 789, - /* 190 */ 870, 823, 836, 880, 834, 882, 853, 849, 885, 831, - /* 200 */ 798, 855, 868, 818, 871, 875, 888, 683, 819, 838, - /* 210 */ 852, 828, 629, 899, 887, 860, 839, 867, 843, 886, - /* 220 */ 873, 891, 893, 897, 907, 910, 911, 912, 915, 932, - /* 230 */ 945, 944, 983, 980, 986, 969, 976, 991, 1003, 1002, - /* 240 */ 1009, 955, 953, 973, 994, 1008, 1010, 1020, 1035, + /* 0 */ 259, -226, 236, 61, 577, 638, 671, 752, 784, 835, + /* 10 */ 29, 850, 901, 953, 736, 962, 1010, 1042, 1074, 1122, + /* 20 */ 1135, 1175, 1223, 1280, 1328, 1358, 1388, 1436, 1445, 1493, + /* 30 */ 1541, 1552, 1571, 1630, 1639, 1699, 1708, 1756, 1760, 1796, + /* 40 */ -20, 261, 269, -272, 207, 204, 298, 182, 449, 512, + /* 50 */ -244, -164, -246, -241, -320, -178, -76, -196, 55, 59, + /* 60 */ 132, 239, 419, -222, 422, -227, -256, -139, -1, 534, + /* 70 */ 535, 84, 537, 310, 102, 179, 448, 568, 343, -75, + /* 80 */ -315, -315, -315, -228, -49, 60, 90, 123, 194, 213, + /* 90 */ 219, 235, 317, 380, 472, 507, 559, 565, 566, 567, + /* 100 */ 569, 572, 576, 579, -154, -152, -216, -68, 33, 138, + /* 110 */ 75, 406, 414, -3, 176, -127, 250, 88, 439, 325, + /* 120 */ 202, 111, 302, 526, 555, 578, 583, 612, 631, 605, + /* 130 */ 663, 653, 571, 574, 628, 592, 666, 666, 689, 690, + /* 140 */ 662, 647, 635, 635, 635, 629, 637, 650, 665, 666, + /* 150 */ 751, 703, 757, 717, 771, 774, 735, 743, 745, 782, + /* 160 */ 791, 794, 737, 789, 759, 806, 777, 776, 827, 787, + /* 170 */ 830, 808, 798, 834, 836, 842, 840, 851, 832, 837, + /* 180 */ 838, 839, 841, 844, 845, 846, 847, 848, 849, 855, + /* 190 */ 866, 820, 817, 872, 822, 852, 874, 833, 878, 856, + /* 200 */ 843, 884, 859, 797, 853, 865, 810, 875, 882, 883, + /* 210 */ 666, 811, 816, 860, 870, 635, 905, 877, 876, 858, + /* 220 */ 871, 829, 873, 885, 889, 894, 903, 908, 902, 904, + /* 230 */ 910, 906, 917, 947, 919, 977, 964, 981, 958, 972, + /* 240 */ 982, 986, 985, 995, 943, 944, 959, 984, 989, 991, + /* 250 */ 1005, 1019, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, - /* 10 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, - /* 20 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, - /* 30 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, - /* 40 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, - /* 50 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, - /* 60 */ 1345, 1345, 1345, 1414, 1345, 1345, 1345, 1345, 1345, 1345, - /* 70 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1412, 1552, - /* 80 */ 1345, 1717, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, - /* 90 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, - /* 100 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1414, 1345, - /* 110 */ 1728, 1728, 1728, 1412, 1345, 1345, 1345, 1345, 1345, 1345, - /* 120 */ 1507, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1590, - /* 130 */ 1345, 1345, 1794, 1345, 1596, 1752, 1345, 1345, 1345, 1345, - /* 140 */ 1460, 1744, 1720, 1734, 1721, 1779, 1779, 1779, 1737, 1345, - /* 150 */ 1345, 1345, 1345, 1582, 1345, 1345, 1557, 1554, 1554, 1345, - /* 160 */ 1345, 1345, 1345, 1414, 1345, 1414, 1345, 1414, 1345, 1345, - /* 170 */ 1414, 1414, 1345, 1414, 1345, 1345, 1345, 1345, 1345, 1345, - /* 180 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, - /* 190 */ 1412, 1592, 1345, 1412, 1345, 1412, 1345, 1345, 1412, 1345, - /* 200 */ 1759, 1757, 1345, 1759, 1757, 1345, 1345, 1345, 1771, 1767, - /* 210 */ 1750, 1748, 1734, 1345, 1345, 1345, 1785, 1781, 1797, 1785, - /* 220 */ 1781, 1785, 1781, 1345, 1757, 1345, 1345, 1757, 1345, 1565, - /* 230 */ 1345, 1345, 1412, 1345, 1412, 1345, 1476, 1345, 1345, 1412, - /* 240 */ 1345, 1584, 1598, 1574, 1510, 1510, 1510, 1415, 1350, 1345, - /* 250 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, - /* 260 */ 1472, 1661, 1770, 1769, 1693, 1692, 1691, 1689, 1660, 1345, - /* 270 */ 1345, 1345, 1345, 1345, 1345, 1654, 1655, 1653, 1652, 1345, - /* 280 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, - /* 290 */ 1345, 1345, 1345, 1345, 1718, 1345, 1782, 1786, 1345, 1345, - /* 300 */ 1345, 1638, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, - /* 310 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, - /* 320 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, - /* 330 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, - /* 340 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, - /* 350 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, - /* 360 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, - /* 370 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, - /* 380 */ 1345, 1345, 1345, 1345, 1379, 1345, 1345, 1345, 1345, 1345, - /* 390 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, - /* 400 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, - /* 410 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, - /* 420 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1441, - /* 430 */ 1440, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, - /* 440 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, - /* 450 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, - /* 460 */ 1345, 1741, 1751, 1345, 1345, 1345, 1345, 1345, 1345, 1345, - /* 470 */ 1345, 1345, 1345, 1638, 1345, 1768, 1345, 1727, 1723, 1345, - /* 480 */ 1345, 1719, 1345, 1345, 1780, 1345, 1345, 1345, 1345, 1345, - /* 490 */ 1345, 1345, 1345, 1345, 1713, 1345, 1686, 1345, 1345, 1345, - /* 500 */ 1345, 1345, 1345, 1345, 1345, 1648, 1345, 1345, 1345, 1345, - /* 510 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1637, 1345, - /* 520 */ 1677, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1504, - /* 530 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, - /* 540 */ 1345, 1345, 1489, 1487, 1486, 1485, 1345, 1482, 1345, 1345, - /* 550 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1434, - /* 560 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1425, - /* 570 */ 1345, 1424, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, - /* 580 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, - /* 590 */ 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, 1345, - /* 600 */ 1345, 1345, 1345, 1345, 1345, + /* 0 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, + /* 10 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, + /* 20 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, + /* 30 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, + /* 40 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, + /* 50 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, + /* 60 */ 1356, 1356, 1356, 1425, 1356, 1356, 1356, 1356, 1356, 1356, + /* 70 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1423, 1564, + /* 80 */ 1356, 1731, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, + /* 90 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, + /* 100 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1425, 1356, + /* 110 */ 1742, 1742, 1742, 1423, 1356, 1356, 1356, 1356, 1356, 1356, + /* 120 */ 1519, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1603, + /* 130 */ 1356, 1356, 1808, 1356, 1609, 1766, 1356, 1356, 1356, 1356, + /* 140 */ 1472, 1758, 1734, 1748, 1735, 1793, 1793, 1793, 1751, 1356, + /* 150 */ 1356, 1356, 1356, 1595, 1356, 1356, 1569, 1566, 1566, 1356, + /* 160 */ 1356, 1356, 1356, 1425, 1356, 1425, 1356, 1356, 1425, 1356, + /* 170 */ 1425, 1356, 1356, 1425, 1425, 1356, 1425, 1356, 1356, 1356, + /* 180 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, + /* 190 */ 1356, 1356, 1356, 1423, 1605, 1356, 1423, 1356, 1423, 1356, + /* 200 */ 1356, 1423, 1356, 1773, 1771, 1356, 1773, 1771, 1356, 1356, + /* 210 */ 1356, 1785, 1781, 1764, 1762, 1748, 1356, 1356, 1356, 1799, + /* 220 */ 1795, 1811, 1799, 1795, 1799, 1795, 1356, 1771, 1356, 1356, + /* 230 */ 1771, 1356, 1577, 1356, 1356, 1423, 1356, 1423, 1356, 1488, + /* 240 */ 1356, 1356, 1423, 1356, 1597, 1611, 1587, 1522, 1522, 1522, + /* 250 */ 1426, 1361, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, + /* 260 */ 1356, 1356, 1356, 1484, 1675, 1784, 1783, 1707, 1706, 1705, + /* 270 */ 1703, 1674, 1356, 1356, 1356, 1356, 1356, 1356, 1668, 1669, + /* 280 */ 1667, 1666, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, + /* 290 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1732, 1356, 1796, + /* 300 */ 1800, 1356, 1356, 1356, 1651, 1356, 1356, 1356, 1356, 1356, + /* 310 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, + /* 320 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, + /* 330 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, + /* 340 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, + /* 350 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, + /* 360 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, + /* 370 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, + /* 380 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1390, 1356, 1356, + /* 390 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, + /* 400 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, + /* 410 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, + /* 420 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, + /* 430 */ 1356, 1356, 1356, 1356, 1453, 1452, 1356, 1356, 1356, 1356, + /* 440 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, + /* 450 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, + /* 460 */ 1356, 1356, 1356, 1356, 1356, 1356, 1755, 1765, 1356, 1356, + /* 470 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1651, 1356, + /* 480 */ 1782, 1356, 1741, 1737, 1356, 1356, 1733, 1356, 1356, 1794, + /* 490 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1727, + /* 500 */ 1356, 1700, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, + /* 510 */ 1662, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, + /* 520 */ 1356, 1356, 1356, 1650, 1356, 1691, 1356, 1356, 1356, 1356, + /* 530 */ 1356, 1356, 1356, 1356, 1516, 1356, 1356, 1356, 1356, 1356, + /* 540 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1501, 1499, 1498, + /* 550 */ 1497, 1356, 1494, 1356, 1356, 1356, 1356, 1356, 1356, 1356, + /* 560 */ 1356, 1356, 1356, 1356, 1356, 1445, 1356, 1356, 1356, 1356, + /* 570 */ 1356, 1356, 1356, 1356, 1356, 1436, 1356, 1435, 1356, 1356, + /* 580 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, + /* 590 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, + /* 600 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, + /* 610 */ 1356, }; /********** End of lemon-generated parsing tables *****************************/ @@ -903,6 +906,7 @@ static const YYCODETYPE yyFallback[] = { 0, /* VGROUPS => nothing */ 0, /* SINGLE_STABLE => nothing */ 0, /* RETENTIONS => nothing */ + 0, /* SCHEMALESS => nothing */ 0, /* NK_COLON => nothing */ 0, /* TABLE => nothing */ 0, /* NK_LP => nothing */ @@ -971,6 +975,7 @@ static const YYCODETYPE yyFallback[] = { 0, /* INTERVAL => nothing */ 0, /* TOPIC => nothing */ 0, /* AS => nothing */ + 0, /* CGROUP => nothing */ 0, /* WITH => nothing */ 0, /* SCHEMA => nothing */ 0, /* DESC => nothing */ @@ -1057,12 +1062,12 @@ static const YYCODETYPE yyFallback[] = { 0, /* ASC => nothing */ 0, /* NULLS => nothing */ 0, /* ID => nothing */ - 231, /* NK_BITNOT => ID */ - 231, /* INSERT => ID */ - 231, /* VALUES => ID */ - 231, /* IMPORT => ID */ - 231, /* NK_SEMI => ID */ - 231, /* FILE => ID */ + 233, /* NK_BITNOT => ID */ + 233, /* INSERT => ID */ + 233, /* VALUES => ID */ + 233, /* IMPORT => ID */ + 233, /* NK_SEMI => ID */ + 233, /* FILE => ID */ }; #endif /* YYFALLBACK */ @@ -1228,286 +1233,289 @@ static const char *const yyTokenName[] = { /* 75 */ "VGROUPS", /* 76 */ "SINGLE_STABLE", /* 77 */ "RETENTIONS", - /* 78 */ "NK_COLON", - /* 79 */ "TABLE", - /* 80 */ "NK_LP", - /* 81 */ "NK_RP", - /* 82 */ "STABLE", - /* 83 */ "ADD", - /* 84 */ "COLUMN", - /* 85 */ "MODIFY", - /* 86 */ "RENAME", - /* 87 */ "TAG", - /* 88 */ "SET", - /* 89 */ "NK_EQ", - /* 90 */ "USING", - /* 91 */ "TAGS", - /* 92 */ "COMMENT", - /* 93 */ "BOOL", - /* 94 */ "TINYINT", - /* 95 */ "SMALLINT", - /* 96 */ "INT", - /* 97 */ "INTEGER", - /* 98 */ "BIGINT", - /* 99 */ "FLOAT", - /* 100 */ "DOUBLE", - /* 101 */ "BINARY", - /* 102 */ "TIMESTAMP", - /* 103 */ "NCHAR", - /* 104 */ "UNSIGNED", - /* 105 */ "JSON", - /* 106 */ "VARCHAR", - /* 107 */ "MEDIUMBLOB", - /* 108 */ "BLOB", - /* 109 */ "VARBINARY", - /* 110 */ "DECIMAL", - /* 111 */ "DELAY", - /* 112 */ "FILE_FACTOR", - /* 113 */ "NK_FLOAT", - /* 114 */ "ROLLUP", - /* 115 */ "TTL", - /* 116 */ "SMA", - /* 117 */ "SHOW", - /* 118 */ "DATABASES", - /* 119 */ "TABLES", - /* 120 */ "STABLES", - /* 121 */ "MNODES", - /* 122 */ "MODULES", - /* 123 */ "QNODES", - /* 124 */ "FUNCTIONS", - /* 125 */ "INDEXES", - /* 126 */ "ACCOUNTS", - /* 127 */ "APPS", - /* 128 */ "CONNECTIONS", - /* 129 */ "LICENCE", - /* 130 */ "GRANTS", - /* 131 */ "QUERIES", - /* 132 */ "SCORES", - /* 133 */ "TOPICS", - /* 134 */ "VARIABLES", - /* 135 */ "BNODES", - /* 136 */ "SNODES", - /* 137 */ "CLUSTER", - /* 138 */ "TRANSACTIONS", - /* 139 */ "LIKE", - /* 140 */ "INDEX", - /* 141 */ "FULLTEXT", - /* 142 */ "FUNCTION", - /* 143 */ "INTERVAL", - /* 144 */ "TOPIC", - /* 145 */ "AS", - /* 146 */ "WITH", - /* 147 */ "SCHEMA", - /* 148 */ "DESC", - /* 149 */ "DESCRIBE", - /* 150 */ "RESET", - /* 151 */ "QUERY", - /* 152 */ "CACHE", - /* 153 */ "EXPLAIN", - /* 154 */ "ANALYZE", - /* 155 */ "VERBOSE", - /* 156 */ "NK_BOOL", - /* 157 */ "RATIO", - /* 158 */ "COMPACT", - /* 159 */ "VNODES", - /* 160 */ "IN", - /* 161 */ "OUTPUTTYPE", - /* 162 */ "AGGREGATE", - /* 163 */ "BUFSIZE", - /* 164 */ "STREAM", - /* 165 */ "INTO", - /* 166 */ "TRIGGER", - /* 167 */ "AT_ONCE", - /* 168 */ "WINDOW_CLOSE", - /* 169 */ "WATERMARK", - /* 170 */ "KILL", - /* 171 */ "CONNECTION", - /* 172 */ "TRANSACTION", - /* 173 */ "MERGE", - /* 174 */ "VGROUP", - /* 175 */ "REDISTRIBUTE", - /* 176 */ "SPLIT", - /* 177 */ "SYNCDB", - /* 178 */ "NULL", - /* 179 */ "NK_QUESTION", - /* 180 */ "NK_ARROW", - /* 181 */ "ROWTS", - /* 182 */ "TBNAME", - /* 183 */ "QSTARTTS", - /* 184 */ "QENDTS", - /* 185 */ "WSTARTTS", - /* 186 */ "WENDTS", - /* 187 */ "WDURATION", - /* 188 */ "CAST", - /* 189 */ "NOW", - /* 190 */ "TODAY", - /* 191 */ "TIMEZONE", - /* 192 */ "COUNT", - /* 193 */ "FIRST", - /* 194 */ "LAST", - /* 195 */ "LAST_ROW", - /* 196 */ "BETWEEN", - /* 197 */ "IS", - /* 198 */ "NK_LT", - /* 199 */ "NK_GT", - /* 200 */ "NK_LE", - /* 201 */ "NK_GE", - /* 202 */ "NK_NE", - /* 203 */ "MATCH", - /* 204 */ "NMATCH", - /* 205 */ "CONTAINS", - /* 206 */ "JOIN", - /* 207 */ "INNER", - /* 208 */ "SELECT", - /* 209 */ "DISTINCT", - /* 210 */ "WHERE", - /* 211 */ "PARTITION", - /* 212 */ "BY", - /* 213 */ "SESSION", - /* 214 */ "STATE_WINDOW", - /* 215 */ "SLIDING", - /* 216 */ "FILL", - /* 217 */ "VALUE", - /* 218 */ "NONE", - /* 219 */ "PREV", - /* 220 */ "LINEAR", - /* 221 */ "NEXT", - /* 222 */ "GROUP", - /* 223 */ "HAVING", - /* 224 */ "ORDER", - /* 225 */ "SLIMIT", - /* 226 */ "SOFFSET", - /* 227 */ "LIMIT", - /* 228 */ "OFFSET", - /* 229 */ "ASC", - /* 230 */ "NULLS", - /* 231 */ "ID", - /* 232 */ "NK_BITNOT", - /* 233 */ "INSERT", - /* 234 */ "VALUES", - /* 235 */ "IMPORT", - /* 236 */ "NK_SEMI", - /* 237 */ "FILE", - /* 238 */ "cmd", - /* 239 */ "account_options", - /* 240 */ "alter_account_options", - /* 241 */ "literal", - /* 242 */ "alter_account_option", - /* 243 */ "user_name", - /* 244 */ "privileges", - /* 245 */ "priv_level", - /* 246 */ "priv_type_list", - /* 247 */ "priv_type", - /* 248 */ "db_name", - /* 249 */ "dnode_endpoint", - /* 250 */ "dnode_host_name", - /* 251 */ "not_exists_opt", - /* 252 */ "db_options", - /* 253 */ "exists_opt", - /* 254 */ "alter_db_options", - /* 255 */ "integer_list", - /* 256 */ "variable_list", - /* 257 */ "retention_list", - /* 258 */ "alter_db_option", - /* 259 */ "retention", - /* 260 */ "full_table_name", - /* 261 */ "column_def_list", - /* 262 */ "tags_def_opt", - /* 263 */ "table_options", - /* 264 */ "multi_create_clause", - /* 265 */ "tags_def", - /* 266 */ "multi_drop_clause", - /* 267 */ "alter_table_clause", - /* 268 */ "alter_table_options", - /* 269 */ "column_name", - /* 270 */ "type_name", - /* 271 */ "signed_literal", - /* 272 */ "create_subtable_clause", - /* 273 */ "specific_tags_opt", - /* 274 */ "literal_list", - /* 275 */ "drop_table_clause", - /* 276 */ "col_name_list", - /* 277 */ "table_name", - /* 278 */ "column_def", - /* 279 */ "func_name_list", - /* 280 */ "alter_table_option", - /* 281 */ "col_name", - /* 282 */ "db_name_cond_opt", - /* 283 */ "like_pattern_opt", - /* 284 */ "table_name_cond", - /* 285 */ "from_db_opt", - /* 286 */ "func_name", - /* 287 */ "function_name", - /* 288 */ "index_name", - /* 289 */ "index_options", - /* 290 */ "func_list", - /* 291 */ "duration_literal", - /* 292 */ "sliding_opt", - /* 293 */ "func", - /* 294 */ "expression_list", - /* 295 */ "topic_name", - /* 296 */ "topic_options", - /* 297 */ "query_expression", - /* 298 */ "analyze_opt", - /* 299 */ "explain_options", - /* 300 */ "agg_func_opt", - /* 301 */ "bufsize_opt", - /* 302 */ "stream_name", - /* 303 */ "stream_options", - /* 304 */ "into_opt", - /* 305 */ "dnode_list", - /* 306 */ "signed", - /* 307 */ "literal_func", - /* 308 */ "table_alias", - /* 309 */ "column_alias", - /* 310 */ "expression", - /* 311 */ "pseudo_column", - /* 312 */ "column_reference", - /* 313 */ "function_expression", - /* 314 */ "subquery", - /* 315 */ "star_func", - /* 316 */ "star_func_para_list", - /* 317 */ "noarg_func", - /* 318 */ "other_para_list", - /* 319 */ "star_func_para", - /* 320 */ "predicate", - /* 321 */ "compare_op", - /* 322 */ "in_op", - /* 323 */ "in_predicate_value", - /* 324 */ "boolean_value_expression", - /* 325 */ "boolean_primary", - /* 326 */ "common_expression", - /* 327 */ "from_clause", - /* 328 */ "table_reference_list", - /* 329 */ "table_reference", - /* 330 */ "table_primary", - /* 331 */ "joined_table", - /* 332 */ "alias_opt", - /* 333 */ "parenthesized_joined_table", - /* 334 */ "join_type", - /* 335 */ "search_condition", - /* 336 */ "query_specification", - /* 337 */ "set_quantifier_opt", - /* 338 */ "select_list", - /* 339 */ "where_clause_opt", - /* 340 */ "partition_by_clause_opt", - /* 341 */ "twindow_clause_opt", - /* 342 */ "group_by_clause_opt", - /* 343 */ "having_clause_opt", - /* 344 */ "select_sublist", - /* 345 */ "select_item", - /* 346 */ "fill_opt", - /* 347 */ "fill_mode", - /* 348 */ "group_by_list", - /* 349 */ "query_expression_body", - /* 350 */ "order_by_clause_opt", - /* 351 */ "slimit_clause_opt", - /* 352 */ "limit_clause_opt", - /* 353 */ "query_primary", - /* 354 */ "sort_specification_list", - /* 355 */ "sort_specification", - /* 356 */ "ordering_specification_opt", - /* 357 */ "null_ordering_opt", + /* 78 */ "SCHEMALESS", + /* 79 */ "NK_COLON", + /* 80 */ "TABLE", + /* 81 */ "NK_LP", + /* 82 */ "NK_RP", + /* 83 */ "STABLE", + /* 84 */ "ADD", + /* 85 */ "COLUMN", + /* 86 */ "MODIFY", + /* 87 */ "RENAME", + /* 88 */ "TAG", + /* 89 */ "SET", + /* 90 */ "NK_EQ", + /* 91 */ "USING", + /* 92 */ "TAGS", + /* 93 */ "COMMENT", + /* 94 */ "BOOL", + /* 95 */ "TINYINT", + /* 96 */ "SMALLINT", + /* 97 */ "INT", + /* 98 */ "INTEGER", + /* 99 */ "BIGINT", + /* 100 */ "FLOAT", + /* 101 */ "DOUBLE", + /* 102 */ "BINARY", + /* 103 */ "TIMESTAMP", + /* 104 */ "NCHAR", + /* 105 */ "UNSIGNED", + /* 106 */ "JSON", + /* 107 */ "VARCHAR", + /* 108 */ "MEDIUMBLOB", + /* 109 */ "BLOB", + /* 110 */ "VARBINARY", + /* 111 */ "DECIMAL", + /* 112 */ "DELAY", + /* 113 */ "FILE_FACTOR", + /* 114 */ "NK_FLOAT", + /* 115 */ "ROLLUP", + /* 116 */ "TTL", + /* 117 */ "SMA", + /* 118 */ "SHOW", + /* 119 */ "DATABASES", + /* 120 */ "TABLES", + /* 121 */ "STABLES", + /* 122 */ "MNODES", + /* 123 */ "MODULES", + /* 124 */ "QNODES", + /* 125 */ "FUNCTIONS", + /* 126 */ "INDEXES", + /* 127 */ "ACCOUNTS", + /* 128 */ "APPS", + /* 129 */ "CONNECTIONS", + /* 130 */ "LICENCE", + /* 131 */ "GRANTS", + /* 132 */ "QUERIES", + /* 133 */ "SCORES", + /* 134 */ "TOPICS", + /* 135 */ "VARIABLES", + /* 136 */ "BNODES", + /* 137 */ "SNODES", + /* 138 */ "CLUSTER", + /* 139 */ "TRANSACTIONS", + /* 140 */ "LIKE", + /* 141 */ "INDEX", + /* 142 */ "FULLTEXT", + /* 143 */ "FUNCTION", + /* 144 */ "INTERVAL", + /* 145 */ "TOPIC", + /* 146 */ "AS", + /* 147 */ "CGROUP", + /* 148 */ "WITH", + /* 149 */ "SCHEMA", + /* 150 */ "DESC", + /* 151 */ "DESCRIBE", + /* 152 */ "RESET", + /* 153 */ "QUERY", + /* 154 */ "CACHE", + /* 155 */ "EXPLAIN", + /* 156 */ "ANALYZE", + /* 157 */ "VERBOSE", + /* 158 */ "NK_BOOL", + /* 159 */ "RATIO", + /* 160 */ "COMPACT", + /* 161 */ "VNODES", + /* 162 */ "IN", + /* 163 */ "OUTPUTTYPE", + /* 164 */ "AGGREGATE", + /* 165 */ "BUFSIZE", + /* 166 */ "STREAM", + /* 167 */ "INTO", + /* 168 */ "TRIGGER", + /* 169 */ "AT_ONCE", + /* 170 */ "WINDOW_CLOSE", + /* 171 */ "WATERMARK", + /* 172 */ "KILL", + /* 173 */ "CONNECTION", + /* 174 */ "TRANSACTION", + /* 175 */ "MERGE", + /* 176 */ "VGROUP", + /* 177 */ "REDISTRIBUTE", + /* 178 */ "SPLIT", + /* 179 */ "SYNCDB", + /* 180 */ "NULL", + /* 181 */ "NK_QUESTION", + /* 182 */ "NK_ARROW", + /* 183 */ "ROWTS", + /* 184 */ "TBNAME", + /* 185 */ "QSTARTTS", + /* 186 */ "QENDTS", + /* 187 */ "WSTARTTS", + /* 188 */ "WENDTS", + /* 189 */ "WDURATION", + /* 190 */ "CAST", + /* 191 */ "NOW", + /* 192 */ "TODAY", + /* 193 */ "TIMEZONE", + /* 194 */ "COUNT", + /* 195 */ "FIRST", + /* 196 */ "LAST", + /* 197 */ "LAST_ROW", + /* 198 */ "BETWEEN", + /* 199 */ "IS", + /* 200 */ "NK_LT", + /* 201 */ "NK_GT", + /* 202 */ "NK_LE", + /* 203 */ "NK_GE", + /* 204 */ "NK_NE", + /* 205 */ "MATCH", + /* 206 */ "NMATCH", + /* 207 */ "CONTAINS", + /* 208 */ "JOIN", + /* 209 */ "INNER", + /* 210 */ "SELECT", + /* 211 */ "DISTINCT", + /* 212 */ "WHERE", + /* 213 */ "PARTITION", + /* 214 */ "BY", + /* 215 */ "SESSION", + /* 216 */ "STATE_WINDOW", + /* 217 */ "SLIDING", + /* 218 */ "FILL", + /* 219 */ "VALUE", + /* 220 */ "NONE", + /* 221 */ "PREV", + /* 222 */ "LINEAR", + /* 223 */ "NEXT", + /* 224 */ "GROUP", + /* 225 */ "HAVING", + /* 226 */ "ORDER", + /* 227 */ "SLIMIT", + /* 228 */ "SOFFSET", + /* 229 */ "LIMIT", + /* 230 */ "OFFSET", + /* 231 */ "ASC", + /* 232 */ "NULLS", + /* 233 */ "ID", + /* 234 */ "NK_BITNOT", + /* 235 */ "INSERT", + /* 236 */ "VALUES", + /* 237 */ "IMPORT", + /* 238 */ "NK_SEMI", + /* 239 */ "FILE", + /* 240 */ "cmd", + /* 241 */ "account_options", + /* 242 */ "alter_account_options", + /* 243 */ "literal", + /* 244 */ "alter_account_option", + /* 245 */ "user_name", + /* 246 */ "privileges", + /* 247 */ "priv_level", + /* 248 */ "priv_type_list", + /* 249 */ "priv_type", + /* 250 */ "db_name", + /* 251 */ "dnode_endpoint", + /* 252 */ "dnode_host_name", + /* 253 */ "not_exists_opt", + /* 254 */ "db_options", + /* 255 */ "exists_opt", + /* 256 */ "alter_db_options", + /* 257 */ "integer_list", + /* 258 */ "variable_list", + /* 259 */ "retention_list", + /* 260 */ "alter_db_option", + /* 261 */ "retention", + /* 262 */ "full_table_name", + /* 263 */ "column_def_list", + /* 264 */ "tags_def_opt", + /* 265 */ "table_options", + /* 266 */ "multi_create_clause", + /* 267 */ "tags_def", + /* 268 */ "multi_drop_clause", + /* 269 */ "alter_table_clause", + /* 270 */ "alter_table_options", + /* 271 */ "column_name", + /* 272 */ "type_name", + /* 273 */ "signed_literal", + /* 274 */ "create_subtable_clause", + /* 275 */ "specific_tags_opt", + /* 276 */ "literal_list", + /* 277 */ "drop_table_clause", + /* 278 */ "col_name_list", + /* 279 */ "table_name", + /* 280 */ "column_def", + /* 281 */ "func_name_list", + /* 282 */ "alter_table_option", + /* 283 */ "col_name", + /* 284 */ "db_name_cond_opt", + /* 285 */ "like_pattern_opt", + /* 286 */ "table_name_cond", + /* 287 */ "from_db_opt", + /* 288 */ "func_name", + /* 289 */ "function_name", + /* 290 */ "index_name", + /* 291 */ "index_options", + /* 292 */ "func_list", + /* 293 */ "duration_literal", + /* 294 */ "sliding_opt", + /* 295 */ "func", + /* 296 */ "expression_list", + /* 297 */ "topic_name", + /* 298 */ "topic_options", + /* 299 */ "query_expression", + /* 300 */ "cgroup_name", + /* 301 */ "analyze_opt", + /* 302 */ "explain_options", + /* 303 */ "agg_func_opt", + /* 304 */ "bufsize_opt", + /* 305 */ "stream_name", + /* 306 */ "stream_options", + /* 307 */ "into_opt", + /* 308 */ "dnode_list", + /* 309 */ "signed", + /* 310 */ "literal_func", + /* 311 */ "table_alias", + /* 312 */ "column_alias", + /* 313 */ "expression", + /* 314 */ "pseudo_column", + /* 315 */ "column_reference", + /* 316 */ "function_expression", + /* 317 */ "subquery", + /* 318 */ "star_func", + /* 319 */ "star_func_para_list", + /* 320 */ "noarg_func", + /* 321 */ "other_para_list", + /* 322 */ "star_func_para", + /* 323 */ "predicate", + /* 324 */ "compare_op", + /* 325 */ "in_op", + /* 326 */ "in_predicate_value", + /* 327 */ "boolean_value_expression", + /* 328 */ "boolean_primary", + /* 329 */ "common_expression", + /* 330 */ "from_clause", + /* 331 */ "table_reference_list", + /* 332 */ "table_reference", + /* 333 */ "table_primary", + /* 334 */ "joined_table", + /* 335 */ "alias_opt", + /* 336 */ "parenthesized_joined_table", + /* 337 */ "join_type", + /* 338 */ "search_condition", + /* 339 */ "query_specification", + /* 340 */ "set_quantifier_opt", + /* 341 */ "select_list", + /* 342 */ "where_clause_opt", + /* 343 */ "partition_by_clause_opt", + /* 344 */ "twindow_clause_opt", + /* 345 */ "group_by_clause_opt", + /* 346 */ "having_clause_opt", + /* 347 */ "select_sublist", + /* 348 */ "select_item", + /* 349 */ "fill_opt", + /* 350 */ "fill_mode", + /* 351 */ "group_by_list", + /* 352 */ "query_expression_body", + /* 353 */ "order_by_clause_opt", + /* 354 */ "slimit_clause_opt", + /* 355 */ "limit_clause_opt", + /* 356 */ "query_primary", + /* 357 */ "sort_specification_list", + /* 358 */ "sort_specification", + /* 359 */ "ordering_specification_opt", + /* 360 */ "null_ordering_opt", }; #endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */ @@ -1602,371 +1610,374 @@ static const char *const yyRuleName[] = { /* 84 */ "db_options ::= db_options VGROUPS NK_INTEGER", /* 85 */ "db_options ::= db_options SINGLE_STABLE NK_INTEGER", /* 86 */ "db_options ::= db_options RETENTIONS retention_list", - /* 87 */ "alter_db_options ::= alter_db_option", - /* 88 */ "alter_db_options ::= alter_db_options alter_db_option", - /* 89 */ "alter_db_option ::= BUFFER NK_INTEGER", - /* 90 */ "alter_db_option ::= CACHELAST NK_INTEGER", - /* 91 */ "alter_db_option ::= FSYNC NK_INTEGER", - /* 92 */ "alter_db_option ::= KEEP integer_list", - /* 93 */ "alter_db_option ::= KEEP variable_list", - /* 94 */ "alter_db_option ::= PAGES NK_INTEGER", - /* 95 */ "alter_db_option ::= REPLICA NK_INTEGER", - /* 96 */ "alter_db_option ::= STRICT NK_INTEGER", - /* 97 */ "alter_db_option ::= WAL NK_INTEGER", - /* 98 */ "integer_list ::= NK_INTEGER", - /* 99 */ "integer_list ::= integer_list NK_COMMA NK_INTEGER", - /* 100 */ "variable_list ::= NK_VARIABLE", - /* 101 */ "variable_list ::= variable_list NK_COMMA NK_VARIABLE", - /* 102 */ "retention_list ::= retention", - /* 103 */ "retention_list ::= retention_list NK_COMMA retention", - /* 104 */ "retention ::= NK_VARIABLE NK_COLON NK_VARIABLE", - /* 105 */ "cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options", - /* 106 */ "cmd ::= CREATE TABLE multi_create_clause", - /* 107 */ "cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options", - /* 108 */ "cmd ::= DROP TABLE multi_drop_clause", - /* 109 */ "cmd ::= DROP STABLE exists_opt full_table_name", - /* 110 */ "cmd ::= ALTER TABLE alter_table_clause", - /* 111 */ "cmd ::= ALTER STABLE alter_table_clause", - /* 112 */ "alter_table_clause ::= full_table_name alter_table_options", - /* 113 */ "alter_table_clause ::= full_table_name ADD COLUMN column_name type_name", - /* 114 */ "alter_table_clause ::= full_table_name DROP COLUMN column_name", - /* 115 */ "alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name", - /* 116 */ "alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name", - /* 117 */ "alter_table_clause ::= full_table_name ADD TAG column_name type_name", - /* 118 */ "alter_table_clause ::= full_table_name DROP TAG column_name", - /* 119 */ "alter_table_clause ::= full_table_name MODIFY TAG column_name type_name", - /* 120 */ "alter_table_clause ::= full_table_name RENAME TAG column_name column_name", - /* 121 */ "alter_table_clause ::= full_table_name SET TAG column_name NK_EQ signed_literal", - /* 122 */ "multi_create_clause ::= create_subtable_clause", - /* 123 */ "multi_create_clause ::= multi_create_clause create_subtable_clause", - /* 124 */ "create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_tags_opt TAGS NK_LP literal_list NK_RP table_options", - /* 125 */ "multi_drop_clause ::= drop_table_clause", - /* 126 */ "multi_drop_clause ::= multi_drop_clause drop_table_clause", - /* 127 */ "drop_table_clause ::= exists_opt full_table_name", - /* 128 */ "specific_tags_opt ::=", - /* 129 */ "specific_tags_opt ::= NK_LP col_name_list NK_RP", - /* 130 */ "full_table_name ::= table_name", - /* 131 */ "full_table_name ::= db_name NK_DOT table_name", - /* 132 */ "column_def_list ::= column_def", - /* 133 */ "column_def_list ::= column_def_list NK_COMMA column_def", - /* 134 */ "column_def ::= column_name type_name", - /* 135 */ "column_def ::= column_name type_name COMMENT NK_STRING", - /* 136 */ "type_name ::= BOOL", - /* 137 */ "type_name ::= TINYINT", - /* 138 */ "type_name ::= SMALLINT", - /* 139 */ "type_name ::= INT", - /* 140 */ "type_name ::= INTEGER", - /* 141 */ "type_name ::= BIGINT", - /* 142 */ "type_name ::= FLOAT", - /* 143 */ "type_name ::= DOUBLE", - /* 144 */ "type_name ::= BINARY NK_LP NK_INTEGER NK_RP", - /* 145 */ "type_name ::= TIMESTAMP", - /* 146 */ "type_name ::= NCHAR NK_LP NK_INTEGER NK_RP", - /* 147 */ "type_name ::= TINYINT UNSIGNED", - /* 148 */ "type_name ::= SMALLINT UNSIGNED", - /* 149 */ "type_name ::= INT UNSIGNED", - /* 150 */ "type_name ::= BIGINT UNSIGNED", - /* 151 */ "type_name ::= JSON", - /* 152 */ "type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP", - /* 153 */ "type_name ::= MEDIUMBLOB", - /* 154 */ "type_name ::= BLOB", - /* 155 */ "type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP", - /* 156 */ "type_name ::= DECIMAL", - /* 157 */ "type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP", - /* 158 */ "type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP", - /* 159 */ "tags_def_opt ::=", - /* 160 */ "tags_def_opt ::= tags_def", - /* 161 */ "tags_def ::= TAGS NK_LP column_def_list NK_RP", - /* 162 */ "table_options ::=", - /* 163 */ "table_options ::= table_options COMMENT NK_STRING", - /* 164 */ "table_options ::= table_options DELAY NK_INTEGER", - /* 165 */ "table_options ::= table_options FILE_FACTOR NK_FLOAT", - /* 166 */ "table_options ::= table_options ROLLUP NK_LP func_name_list NK_RP", - /* 167 */ "table_options ::= table_options TTL NK_INTEGER", - /* 168 */ "table_options ::= table_options SMA NK_LP col_name_list NK_RP", - /* 169 */ "alter_table_options ::= alter_table_option", - /* 170 */ "alter_table_options ::= alter_table_options alter_table_option", - /* 171 */ "alter_table_option ::= COMMENT NK_STRING", - /* 172 */ "alter_table_option ::= TTL NK_INTEGER", - /* 173 */ "col_name_list ::= col_name", - /* 174 */ "col_name_list ::= col_name_list NK_COMMA col_name", - /* 175 */ "col_name ::= column_name", - /* 176 */ "cmd ::= SHOW DNODES", - /* 177 */ "cmd ::= SHOW USERS", - /* 178 */ "cmd ::= SHOW DATABASES", - /* 179 */ "cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt", - /* 180 */ "cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt", - /* 181 */ "cmd ::= SHOW db_name_cond_opt VGROUPS", - /* 182 */ "cmd ::= SHOW MNODES", - /* 183 */ "cmd ::= SHOW MODULES", - /* 184 */ "cmd ::= SHOW QNODES", - /* 185 */ "cmd ::= SHOW FUNCTIONS", - /* 186 */ "cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt", - /* 187 */ "cmd ::= SHOW STREAMS", - /* 188 */ "cmd ::= SHOW ACCOUNTS", - /* 189 */ "cmd ::= SHOW APPS", - /* 190 */ "cmd ::= SHOW CONNECTIONS", - /* 191 */ "cmd ::= SHOW LICENCE", - /* 192 */ "cmd ::= SHOW GRANTS", - /* 193 */ "cmd ::= SHOW CREATE DATABASE db_name", - /* 194 */ "cmd ::= SHOW CREATE TABLE full_table_name", - /* 195 */ "cmd ::= SHOW CREATE STABLE full_table_name", - /* 196 */ "cmd ::= SHOW QUERIES", - /* 197 */ "cmd ::= SHOW SCORES", - /* 198 */ "cmd ::= SHOW TOPICS", - /* 199 */ "cmd ::= SHOW VARIABLES", - /* 200 */ "cmd ::= SHOW BNODES", - /* 201 */ "cmd ::= SHOW SNODES", - /* 202 */ "cmd ::= SHOW CLUSTER", - /* 203 */ "cmd ::= SHOW TRANSACTIONS", - /* 204 */ "db_name_cond_opt ::=", - /* 205 */ "db_name_cond_opt ::= db_name NK_DOT", - /* 206 */ "like_pattern_opt ::=", - /* 207 */ "like_pattern_opt ::= LIKE NK_STRING", - /* 208 */ "table_name_cond ::= table_name", - /* 209 */ "from_db_opt ::=", - /* 210 */ "from_db_opt ::= FROM db_name", - /* 211 */ "func_name_list ::= func_name", - /* 212 */ "func_name_list ::= func_name_list NK_COMMA func_name", - /* 213 */ "func_name ::= function_name", - /* 214 */ "cmd ::= CREATE SMA INDEX not_exists_opt index_name ON table_name index_options", - /* 215 */ "cmd ::= CREATE FULLTEXT INDEX not_exists_opt index_name ON table_name NK_LP col_name_list NK_RP", - /* 216 */ "cmd ::= DROP INDEX exists_opt index_name ON table_name", - /* 217 */ "index_options ::=", - /* 218 */ "index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt", - /* 219 */ "index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt", - /* 220 */ "func_list ::= func", - /* 221 */ "func_list ::= func_list NK_COMMA func", - /* 222 */ "func ::= function_name NK_LP expression_list NK_RP", - /* 223 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name topic_options AS query_expression", - /* 224 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name topic_options AS db_name", - /* 225 */ "cmd ::= DROP TOPIC exists_opt topic_name", - /* 226 */ "topic_options ::=", - /* 227 */ "topic_options ::= topic_options WITH TABLE", - /* 228 */ "topic_options ::= topic_options WITH SCHEMA", - /* 229 */ "topic_options ::= topic_options WITH TAG", - /* 230 */ "cmd ::= DESC full_table_name", - /* 231 */ "cmd ::= DESCRIBE full_table_name", - /* 232 */ "cmd ::= RESET QUERY CACHE", - /* 233 */ "cmd ::= EXPLAIN analyze_opt explain_options query_expression", - /* 234 */ "analyze_opt ::=", - /* 235 */ "analyze_opt ::= ANALYZE", - /* 236 */ "explain_options ::=", - /* 237 */ "explain_options ::= explain_options VERBOSE NK_BOOL", - /* 238 */ "explain_options ::= explain_options RATIO NK_FLOAT", - /* 239 */ "cmd ::= COMPACT VNODES IN NK_LP integer_list NK_RP", - /* 240 */ "cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt", - /* 241 */ "cmd ::= DROP FUNCTION exists_opt function_name", - /* 242 */ "agg_func_opt ::=", - /* 243 */ "agg_func_opt ::= AGGREGATE", - /* 244 */ "bufsize_opt ::=", - /* 245 */ "bufsize_opt ::= BUFSIZE NK_INTEGER", - /* 246 */ "cmd ::= CREATE STREAM not_exists_opt stream_name stream_options into_opt AS query_expression", - /* 247 */ "cmd ::= DROP STREAM exists_opt stream_name", - /* 248 */ "into_opt ::=", - /* 249 */ "into_opt ::= INTO full_table_name", - /* 250 */ "stream_options ::=", - /* 251 */ "stream_options ::= stream_options TRIGGER AT_ONCE", - /* 252 */ "stream_options ::= stream_options TRIGGER WINDOW_CLOSE", - /* 253 */ "stream_options ::= stream_options WATERMARK duration_literal", - /* 254 */ "cmd ::= KILL CONNECTION NK_INTEGER", - /* 255 */ "cmd ::= KILL QUERY NK_INTEGER", - /* 256 */ "cmd ::= KILL TRANSACTION NK_INTEGER", - /* 257 */ "cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER", - /* 258 */ "cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list", - /* 259 */ "cmd ::= SPLIT VGROUP NK_INTEGER", - /* 260 */ "dnode_list ::= DNODE NK_INTEGER", - /* 261 */ "dnode_list ::= dnode_list DNODE NK_INTEGER", - /* 262 */ "cmd ::= SYNCDB db_name REPLICA", - /* 263 */ "cmd ::= query_expression", - /* 264 */ "literal ::= NK_INTEGER", - /* 265 */ "literal ::= NK_FLOAT", - /* 266 */ "literal ::= NK_STRING", - /* 267 */ "literal ::= NK_BOOL", - /* 268 */ "literal ::= TIMESTAMP NK_STRING", - /* 269 */ "literal ::= duration_literal", - /* 270 */ "literal ::= NULL", - /* 271 */ "literal ::= NK_QUESTION", - /* 272 */ "duration_literal ::= NK_VARIABLE", - /* 273 */ "signed ::= NK_INTEGER", - /* 274 */ "signed ::= NK_PLUS NK_INTEGER", - /* 275 */ "signed ::= NK_MINUS NK_INTEGER", - /* 276 */ "signed ::= NK_FLOAT", - /* 277 */ "signed ::= NK_PLUS NK_FLOAT", - /* 278 */ "signed ::= NK_MINUS NK_FLOAT", - /* 279 */ "signed_literal ::= signed", - /* 280 */ "signed_literal ::= NK_STRING", - /* 281 */ "signed_literal ::= NK_BOOL", - /* 282 */ "signed_literal ::= TIMESTAMP NK_STRING", - /* 283 */ "signed_literal ::= duration_literal", - /* 284 */ "signed_literal ::= NULL", - /* 285 */ "signed_literal ::= literal_func", - /* 286 */ "literal_list ::= signed_literal", - /* 287 */ "literal_list ::= literal_list NK_COMMA signed_literal", - /* 288 */ "db_name ::= NK_ID", - /* 289 */ "table_name ::= NK_ID", - /* 290 */ "column_name ::= NK_ID", - /* 291 */ "function_name ::= NK_ID", - /* 292 */ "table_alias ::= NK_ID", - /* 293 */ "column_alias ::= NK_ID", - /* 294 */ "user_name ::= NK_ID", - /* 295 */ "index_name ::= NK_ID", - /* 296 */ "topic_name ::= NK_ID", - /* 297 */ "stream_name ::= NK_ID", - /* 298 */ "expression ::= literal", - /* 299 */ "expression ::= pseudo_column", - /* 300 */ "expression ::= column_reference", - /* 301 */ "expression ::= function_expression", - /* 302 */ "expression ::= subquery", - /* 303 */ "expression ::= NK_LP expression NK_RP", - /* 304 */ "expression ::= NK_PLUS expression", - /* 305 */ "expression ::= NK_MINUS expression", - /* 306 */ "expression ::= expression NK_PLUS expression", - /* 307 */ "expression ::= expression NK_MINUS expression", - /* 308 */ "expression ::= expression NK_STAR expression", - /* 309 */ "expression ::= expression NK_SLASH expression", - /* 310 */ "expression ::= expression NK_REM expression", - /* 311 */ "expression ::= column_reference NK_ARROW NK_STRING", - /* 312 */ "expression_list ::= expression", - /* 313 */ "expression_list ::= expression_list NK_COMMA expression", - /* 314 */ "column_reference ::= column_name", - /* 315 */ "column_reference ::= table_name NK_DOT column_name", - /* 316 */ "pseudo_column ::= ROWTS", - /* 317 */ "pseudo_column ::= TBNAME", - /* 318 */ "pseudo_column ::= table_name NK_DOT TBNAME", - /* 319 */ "pseudo_column ::= QSTARTTS", - /* 320 */ "pseudo_column ::= QENDTS", - /* 321 */ "pseudo_column ::= WSTARTTS", - /* 322 */ "pseudo_column ::= WENDTS", - /* 323 */ "pseudo_column ::= WDURATION", - /* 324 */ "function_expression ::= function_name NK_LP expression_list NK_RP", - /* 325 */ "function_expression ::= star_func NK_LP star_func_para_list NK_RP", - /* 326 */ "function_expression ::= CAST NK_LP expression AS type_name NK_RP", - /* 327 */ "function_expression ::= literal_func", - /* 328 */ "literal_func ::= noarg_func NK_LP NK_RP", - /* 329 */ "literal_func ::= NOW", - /* 330 */ "noarg_func ::= NOW", - /* 331 */ "noarg_func ::= TODAY", - /* 332 */ "noarg_func ::= TIMEZONE", - /* 333 */ "star_func ::= COUNT", - /* 334 */ "star_func ::= FIRST", - /* 335 */ "star_func ::= LAST", - /* 336 */ "star_func ::= LAST_ROW", - /* 337 */ "star_func_para_list ::= NK_STAR", - /* 338 */ "star_func_para_list ::= other_para_list", - /* 339 */ "other_para_list ::= star_func_para", - /* 340 */ "other_para_list ::= other_para_list NK_COMMA star_func_para", - /* 341 */ "star_func_para ::= expression", - /* 342 */ "star_func_para ::= table_name NK_DOT NK_STAR", - /* 343 */ "predicate ::= expression compare_op expression", - /* 344 */ "predicate ::= expression BETWEEN expression AND expression", - /* 345 */ "predicate ::= expression NOT BETWEEN expression AND expression", - /* 346 */ "predicate ::= expression IS NULL", - /* 347 */ "predicate ::= expression IS NOT NULL", - /* 348 */ "predicate ::= expression in_op in_predicate_value", - /* 349 */ "compare_op ::= NK_LT", - /* 350 */ "compare_op ::= NK_GT", - /* 351 */ "compare_op ::= NK_LE", - /* 352 */ "compare_op ::= NK_GE", - /* 353 */ "compare_op ::= NK_NE", - /* 354 */ "compare_op ::= NK_EQ", - /* 355 */ "compare_op ::= LIKE", - /* 356 */ "compare_op ::= NOT LIKE", - /* 357 */ "compare_op ::= MATCH", - /* 358 */ "compare_op ::= NMATCH", - /* 359 */ "compare_op ::= CONTAINS", - /* 360 */ "in_op ::= IN", - /* 361 */ "in_op ::= NOT IN", - /* 362 */ "in_predicate_value ::= NK_LP expression_list NK_RP", - /* 363 */ "boolean_value_expression ::= boolean_primary", - /* 364 */ "boolean_value_expression ::= NOT boolean_primary", - /* 365 */ "boolean_value_expression ::= boolean_value_expression OR boolean_value_expression", - /* 366 */ "boolean_value_expression ::= boolean_value_expression AND boolean_value_expression", - /* 367 */ "boolean_primary ::= predicate", - /* 368 */ "boolean_primary ::= NK_LP boolean_value_expression NK_RP", - /* 369 */ "common_expression ::= expression", - /* 370 */ "common_expression ::= boolean_value_expression", - /* 371 */ "from_clause ::= FROM table_reference_list", - /* 372 */ "table_reference_list ::= table_reference", - /* 373 */ "table_reference_list ::= table_reference_list NK_COMMA table_reference", - /* 374 */ "table_reference ::= table_primary", - /* 375 */ "table_reference ::= joined_table", - /* 376 */ "table_primary ::= table_name alias_opt", - /* 377 */ "table_primary ::= db_name NK_DOT table_name alias_opt", - /* 378 */ "table_primary ::= subquery alias_opt", - /* 379 */ "table_primary ::= parenthesized_joined_table", - /* 380 */ "alias_opt ::=", - /* 381 */ "alias_opt ::= table_alias", - /* 382 */ "alias_opt ::= AS table_alias", - /* 383 */ "parenthesized_joined_table ::= NK_LP joined_table NK_RP", - /* 384 */ "parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP", - /* 385 */ "joined_table ::= table_reference join_type JOIN table_reference ON search_condition", - /* 386 */ "join_type ::=", - /* 387 */ "join_type ::= INNER", - /* 388 */ "query_specification ::= SELECT set_quantifier_opt select_list from_clause where_clause_opt partition_by_clause_opt twindow_clause_opt group_by_clause_opt having_clause_opt", - /* 389 */ "set_quantifier_opt ::=", - /* 390 */ "set_quantifier_opt ::= DISTINCT", - /* 391 */ "set_quantifier_opt ::= ALL", - /* 392 */ "select_list ::= NK_STAR", - /* 393 */ "select_list ::= select_sublist", - /* 394 */ "select_sublist ::= select_item", - /* 395 */ "select_sublist ::= select_sublist NK_COMMA select_item", - /* 396 */ "select_item ::= common_expression", - /* 397 */ "select_item ::= common_expression column_alias", - /* 398 */ "select_item ::= common_expression AS column_alias", - /* 399 */ "select_item ::= table_name NK_DOT NK_STAR", - /* 400 */ "where_clause_opt ::=", - /* 401 */ "where_clause_opt ::= WHERE search_condition", - /* 402 */ "partition_by_clause_opt ::=", - /* 403 */ "partition_by_clause_opt ::= PARTITION BY expression_list", - /* 404 */ "twindow_clause_opt ::=", - /* 405 */ "twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP", - /* 406 */ "twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP", - /* 407 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt", - /* 408 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt", - /* 409 */ "sliding_opt ::=", - /* 410 */ "sliding_opt ::= SLIDING NK_LP duration_literal NK_RP", - /* 411 */ "fill_opt ::=", - /* 412 */ "fill_opt ::= FILL NK_LP fill_mode NK_RP", - /* 413 */ "fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP", - /* 414 */ "fill_mode ::= NONE", - /* 415 */ "fill_mode ::= PREV", - /* 416 */ "fill_mode ::= NULL", - /* 417 */ "fill_mode ::= LINEAR", - /* 418 */ "fill_mode ::= NEXT", - /* 419 */ "group_by_clause_opt ::=", - /* 420 */ "group_by_clause_opt ::= GROUP BY group_by_list", - /* 421 */ "group_by_list ::= expression", - /* 422 */ "group_by_list ::= group_by_list NK_COMMA expression", - /* 423 */ "having_clause_opt ::=", - /* 424 */ "having_clause_opt ::= HAVING search_condition", - /* 425 */ "query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt", - /* 426 */ "query_expression_body ::= query_primary", - /* 427 */ "query_expression_body ::= query_expression_body UNION ALL query_expression_body", - /* 428 */ "query_expression_body ::= query_expression_body UNION query_expression_body", - /* 429 */ "query_primary ::= query_specification", - /* 430 */ "query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP", - /* 431 */ "order_by_clause_opt ::=", - /* 432 */ "order_by_clause_opt ::= ORDER BY sort_specification_list", - /* 433 */ "slimit_clause_opt ::=", - /* 434 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER", - /* 435 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER", - /* 436 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER", - /* 437 */ "limit_clause_opt ::=", - /* 438 */ "limit_clause_opt ::= LIMIT NK_INTEGER", - /* 439 */ "limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER", - /* 440 */ "limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER", - /* 441 */ "subquery ::= NK_LP query_expression NK_RP", - /* 442 */ "search_condition ::= common_expression", - /* 443 */ "sort_specification_list ::= sort_specification", - /* 444 */ "sort_specification_list ::= sort_specification_list NK_COMMA sort_specification", - /* 445 */ "sort_specification ::= expression ordering_specification_opt null_ordering_opt", - /* 446 */ "ordering_specification_opt ::=", - /* 447 */ "ordering_specification_opt ::= ASC", - /* 448 */ "ordering_specification_opt ::= DESC", - /* 449 */ "null_ordering_opt ::=", - /* 450 */ "null_ordering_opt ::= NULLS FIRST", - /* 451 */ "null_ordering_opt ::= NULLS LAST", + /* 87 */ "db_options ::= db_options SCHEMALESS NK_INTEGER", + /* 88 */ "alter_db_options ::= alter_db_option", + /* 89 */ "alter_db_options ::= alter_db_options alter_db_option", + /* 90 */ "alter_db_option ::= BUFFER NK_INTEGER", + /* 91 */ "alter_db_option ::= CACHELAST NK_INTEGER", + /* 92 */ "alter_db_option ::= FSYNC NK_INTEGER", + /* 93 */ "alter_db_option ::= KEEP integer_list", + /* 94 */ "alter_db_option ::= KEEP variable_list", + /* 95 */ "alter_db_option ::= PAGES NK_INTEGER", + /* 96 */ "alter_db_option ::= REPLICA NK_INTEGER", + /* 97 */ "alter_db_option ::= STRICT NK_INTEGER", + /* 98 */ "alter_db_option ::= WAL NK_INTEGER", + /* 99 */ "integer_list ::= NK_INTEGER", + /* 100 */ "integer_list ::= integer_list NK_COMMA NK_INTEGER", + /* 101 */ "variable_list ::= NK_VARIABLE", + /* 102 */ "variable_list ::= variable_list NK_COMMA NK_VARIABLE", + /* 103 */ "retention_list ::= retention", + /* 104 */ "retention_list ::= retention_list NK_COMMA retention", + /* 105 */ "retention ::= NK_VARIABLE NK_COLON NK_VARIABLE", + /* 106 */ "cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options", + /* 107 */ "cmd ::= CREATE TABLE multi_create_clause", + /* 108 */ "cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options", + /* 109 */ "cmd ::= DROP TABLE multi_drop_clause", + /* 110 */ "cmd ::= DROP STABLE exists_opt full_table_name", + /* 111 */ "cmd ::= ALTER TABLE alter_table_clause", + /* 112 */ "cmd ::= ALTER STABLE alter_table_clause", + /* 113 */ "alter_table_clause ::= full_table_name alter_table_options", + /* 114 */ "alter_table_clause ::= full_table_name ADD COLUMN column_name type_name", + /* 115 */ "alter_table_clause ::= full_table_name DROP COLUMN column_name", + /* 116 */ "alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name", + /* 117 */ "alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name", + /* 118 */ "alter_table_clause ::= full_table_name ADD TAG column_name type_name", + /* 119 */ "alter_table_clause ::= full_table_name DROP TAG column_name", + /* 120 */ "alter_table_clause ::= full_table_name MODIFY TAG column_name type_name", + /* 121 */ "alter_table_clause ::= full_table_name RENAME TAG column_name column_name", + /* 122 */ "alter_table_clause ::= full_table_name SET TAG column_name NK_EQ signed_literal", + /* 123 */ "multi_create_clause ::= create_subtable_clause", + /* 124 */ "multi_create_clause ::= multi_create_clause create_subtable_clause", + /* 125 */ "create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_tags_opt TAGS NK_LP literal_list NK_RP table_options", + /* 126 */ "multi_drop_clause ::= drop_table_clause", + /* 127 */ "multi_drop_clause ::= multi_drop_clause drop_table_clause", + /* 128 */ "drop_table_clause ::= exists_opt full_table_name", + /* 129 */ "specific_tags_opt ::=", + /* 130 */ "specific_tags_opt ::= NK_LP col_name_list NK_RP", + /* 131 */ "full_table_name ::= table_name", + /* 132 */ "full_table_name ::= db_name NK_DOT table_name", + /* 133 */ "column_def_list ::= column_def", + /* 134 */ "column_def_list ::= column_def_list NK_COMMA column_def", + /* 135 */ "column_def ::= column_name type_name", + /* 136 */ "column_def ::= column_name type_name COMMENT NK_STRING", + /* 137 */ "type_name ::= BOOL", + /* 138 */ "type_name ::= TINYINT", + /* 139 */ "type_name ::= SMALLINT", + /* 140 */ "type_name ::= INT", + /* 141 */ "type_name ::= INTEGER", + /* 142 */ "type_name ::= BIGINT", + /* 143 */ "type_name ::= FLOAT", + /* 144 */ "type_name ::= DOUBLE", + /* 145 */ "type_name ::= BINARY NK_LP NK_INTEGER NK_RP", + /* 146 */ "type_name ::= TIMESTAMP", + /* 147 */ "type_name ::= NCHAR NK_LP NK_INTEGER NK_RP", + /* 148 */ "type_name ::= TINYINT UNSIGNED", + /* 149 */ "type_name ::= SMALLINT UNSIGNED", + /* 150 */ "type_name ::= INT UNSIGNED", + /* 151 */ "type_name ::= BIGINT UNSIGNED", + /* 152 */ "type_name ::= JSON", + /* 153 */ "type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP", + /* 154 */ "type_name ::= MEDIUMBLOB", + /* 155 */ "type_name ::= BLOB", + /* 156 */ "type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP", + /* 157 */ "type_name ::= DECIMAL", + /* 158 */ "type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP", + /* 159 */ "type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP", + /* 160 */ "tags_def_opt ::=", + /* 161 */ "tags_def_opt ::= tags_def", + /* 162 */ "tags_def ::= TAGS NK_LP column_def_list NK_RP", + /* 163 */ "table_options ::=", + /* 164 */ "table_options ::= table_options COMMENT NK_STRING", + /* 165 */ "table_options ::= table_options DELAY NK_INTEGER", + /* 166 */ "table_options ::= table_options FILE_FACTOR NK_FLOAT", + /* 167 */ "table_options ::= table_options ROLLUP NK_LP func_name_list NK_RP", + /* 168 */ "table_options ::= table_options TTL NK_INTEGER", + /* 169 */ "table_options ::= table_options SMA NK_LP col_name_list NK_RP", + /* 170 */ "alter_table_options ::= alter_table_option", + /* 171 */ "alter_table_options ::= alter_table_options alter_table_option", + /* 172 */ "alter_table_option ::= COMMENT NK_STRING", + /* 173 */ "alter_table_option ::= TTL NK_INTEGER", + /* 174 */ "col_name_list ::= col_name", + /* 175 */ "col_name_list ::= col_name_list NK_COMMA col_name", + /* 176 */ "col_name ::= column_name", + /* 177 */ "cmd ::= SHOW DNODES", + /* 178 */ "cmd ::= SHOW USERS", + /* 179 */ "cmd ::= SHOW DATABASES", + /* 180 */ "cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt", + /* 181 */ "cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt", + /* 182 */ "cmd ::= SHOW db_name_cond_opt VGROUPS", + /* 183 */ "cmd ::= SHOW MNODES", + /* 184 */ "cmd ::= SHOW MODULES", + /* 185 */ "cmd ::= SHOW QNODES", + /* 186 */ "cmd ::= SHOW FUNCTIONS", + /* 187 */ "cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt", + /* 188 */ "cmd ::= SHOW STREAMS", + /* 189 */ "cmd ::= SHOW ACCOUNTS", + /* 190 */ "cmd ::= SHOW APPS", + /* 191 */ "cmd ::= SHOW CONNECTIONS", + /* 192 */ "cmd ::= SHOW LICENCE", + /* 193 */ "cmd ::= SHOW GRANTS", + /* 194 */ "cmd ::= SHOW CREATE DATABASE db_name", + /* 195 */ "cmd ::= SHOW CREATE TABLE full_table_name", + /* 196 */ "cmd ::= SHOW CREATE STABLE full_table_name", + /* 197 */ "cmd ::= SHOW QUERIES", + /* 198 */ "cmd ::= SHOW SCORES", + /* 199 */ "cmd ::= SHOW TOPICS", + /* 200 */ "cmd ::= SHOW VARIABLES", + /* 201 */ "cmd ::= SHOW BNODES", + /* 202 */ "cmd ::= SHOW SNODES", + /* 203 */ "cmd ::= SHOW CLUSTER", + /* 204 */ "cmd ::= SHOW TRANSACTIONS", + /* 205 */ "db_name_cond_opt ::=", + /* 206 */ "db_name_cond_opt ::= db_name NK_DOT", + /* 207 */ "like_pattern_opt ::=", + /* 208 */ "like_pattern_opt ::= LIKE NK_STRING", + /* 209 */ "table_name_cond ::= table_name", + /* 210 */ "from_db_opt ::=", + /* 211 */ "from_db_opt ::= FROM db_name", + /* 212 */ "func_name_list ::= func_name", + /* 213 */ "func_name_list ::= func_name_list NK_COMMA func_name", + /* 214 */ "func_name ::= function_name", + /* 215 */ "cmd ::= CREATE SMA INDEX not_exists_opt index_name ON table_name index_options", + /* 216 */ "cmd ::= CREATE FULLTEXT INDEX not_exists_opt index_name ON table_name NK_LP col_name_list NK_RP", + /* 217 */ "cmd ::= DROP INDEX exists_opt index_name ON table_name", + /* 218 */ "index_options ::=", + /* 219 */ "index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt", + /* 220 */ "index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt", + /* 221 */ "func_list ::= func", + /* 222 */ "func_list ::= func_list NK_COMMA func", + /* 223 */ "func ::= function_name NK_LP expression_list NK_RP", + /* 224 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name topic_options AS query_expression", + /* 225 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name topic_options AS db_name", + /* 226 */ "cmd ::= DROP TOPIC exists_opt topic_name", + /* 227 */ "cmd ::= DROP CGROUP exists_opt cgroup_name ON topic_name", + /* 228 */ "topic_options ::=", + /* 229 */ "topic_options ::= topic_options WITH TABLE", + /* 230 */ "topic_options ::= topic_options WITH SCHEMA", + /* 231 */ "topic_options ::= topic_options WITH TAG", + /* 232 */ "cmd ::= DESC full_table_name", + /* 233 */ "cmd ::= DESCRIBE full_table_name", + /* 234 */ "cmd ::= RESET QUERY CACHE", + /* 235 */ "cmd ::= EXPLAIN analyze_opt explain_options query_expression", + /* 236 */ "analyze_opt ::=", + /* 237 */ "analyze_opt ::= ANALYZE", + /* 238 */ "explain_options ::=", + /* 239 */ "explain_options ::= explain_options VERBOSE NK_BOOL", + /* 240 */ "explain_options ::= explain_options RATIO NK_FLOAT", + /* 241 */ "cmd ::= COMPACT VNODES IN NK_LP integer_list NK_RP", + /* 242 */ "cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt", + /* 243 */ "cmd ::= DROP FUNCTION exists_opt function_name", + /* 244 */ "agg_func_opt ::=", + /* 245 */ "agg_func_opt ::= AGGREGATE", + /* 246 */ "bufsize_opt ::=", + /* 247 */ "bufsize_opt ::= BUFSIZE NK_INTEGER", + /* 248 */ "cmd ::= CREATE STREAM not_exists_opt stream_name stream_options into_opt AS query_expression", + /* 249 */ "cmd ::= DROP STREAM exists_opt stream_name", + /* 250 */ "into_opt ::=", + /* 251 */ "into_opt ::= INTO full_table_name", + /* 252 */ "stream_options ::=", + /* 253 */ "stream_options ::= stream_options TRIGGER AT_ONCE", + /* 254 */ "stream_options ::= stream_options TRIGGER WINDOW_CLOSE", + /* 255 */ "stream_options ::= stream_options WATERMARK duration_literal", + /* 256 */ "cmd ::= KILL CONNECTION NK_INTEGER", + /* 257 */ "cmd ::= KILL QUERY NK_INTEGER", + /* 258 */ "cmd ::= KILL TRANSACTION NK_INTEGER", + /* 259 */ "cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER", + /* 260 */ "cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list", + /* 261 */ "cmd ::= SPLIT VGROUP NK_INTEGER", + /* 262 */ "dnode_list ::= DNODE NK_INTEGER", + /* 263 */ "dnode_list ::= dnode_list DNODE NK_INTEGER", + /* 264 */ "cmd ::= SYNCDB db_name REPLICA", + /* 265 */ "cmd ::= query_expression", + /* 266 */ "literal ::= NK_INTEGER", + /* 267 */ "literal ::= NK_FLOAT", + /* 268 */ "literal ::= NK_STRING", + /* 269 */ "literal ::= NK_BOOL", + /* 270 */ "literal ::= TIMESTAMP NK_STRING", + /* 271 */ "literal ::= duration_literal", + /* 272 */ "literal ::= NULL", + /* 273 */ "literal ::= NK_QUESTION", + /* 274 */ "duration_literal ::= NK_VARIABLE", + /* 275 */ "signed ::= NK_INTEGER", + /* 276 */ "signed ::= NK_PLUS NK_INTEGER", + /* 277 */ "signed ::= NK_MINUS NK_INTEGER", + /* 278 */ "signed ::= NK_FLOAT", + /* 279 */ "signed ::= NK_PLUS NK_FLOAT", + /* 280 */ "signed ::= NK_MINUS NK_FLOAT", + /* 281 */ "signed_literal ::= signed", + /* 282 */ "signed_literal ::= NK_STRING", + /* 283 */ "signed_literal ::= NK_BOOL", + /* 284 */ "signed_literal ::= TIMESTAMP NK_STRING", + /* 285 */ "signed_literal ::= duration_literal", + /* 286 */ "signed_literal ::= NULL", + /* 287 */ "signed_literal ::= literal_func", + /* 288 */ "literal_list ::= signed_literal", + /* 289 */ "literal_list ::= literal_list NK_COMMA signed_literal", + /* 290 */ "db_name ::= NK_ID", + /* 291 */ "table_name ::= NK_ID", + /* 292 */ "column_name ::= NK_ID", + /* 293 */ "function_name ::= NK_ID", + /* 294 */ "table_alias ::= NK_ID", + /* 295 */ "column_alias ::= NK_ID", + /* 296 */ "user_name ::= NK_ID", + /* 297 */ "index_name ::= NK_ID", + /* 298 */ "topic_name ::= NK_ID", + /* 299 */ "stream_name ::= NK_ID", + /* 300 */ "cgroup_name ::= NK_ID", + /* 301 */ "expression ::= literal", + /* 302 */ "expression ::= pseudo_column", + /* 303 */ "expression ::= column_reference", + /* 304 */ "expression ::= function_expression", + /* 305 */ "expression ::= subquery", + /* 306 */ "expression ::= NK_LP expression NK_RP", + /* 307 */ "expression ::= NK_PLUS expression", + /* 308 */ "expression ::= NK_MINUS expression", + /* 309 */ "expression ::= expression NK_PLUS expression", + /* 310 */ "expression ::= expression NK_MINUS expression", + /* 311 */ "expression ::= expression NK_STAR expression", + /* 312 */ "expression ::= expression NK_SLASH expression", + /* 313 */ "expression ::= expression NK_REM expression", + /* 314 */ "expression ::= column_reference NK_ARROW NK_STRING", + /* 315 */ "expression_list ::= expression", + /* 316 */ "expression_list ::= expression_list NK_COMMA expression", + /* 317 */ "column_reference ::= column_name", + /* 318 */ "column_reference ::= table_name NK_DOT column_name", + /* 319 */ "pseudo_column ::= ROWTS", + /* 320 */ "pseudo_column ::= TBNAME", + /* 321 */ "pseudo_column ::= table_name NK_DOT TBNAME", + /* 322 */ "pseudo_column ::= QSTARTTS", + /* 323 */ "pseudo_column ::= QENDTS", + /* 324 */ "pseudo_column ::= WSTARTTS", + /* 325 */ "pseudo_column ::= WENDTS", + /* 326 */ "pseudo_column ::= WDURATION", + /* 327 */ "function_expression ::= function_name NK_LP expression_list NK_RP", + /* 328 */ "function_expression ::= star_func NK_LP star_func_para_list NK_RP", + /* 329 */ "function_expression ::= CAST NK_LP expression AS type_name NK_RP", + /* 330 */ "function_expression ::= literal_func", + /* 331 */ "literal_func ::= noarg_func NK_LP NK_RP", + /* 332 */ "literal_func ::= NOW", + /* 333 */ "noarg_func ::= NOW", + /* 334 */ "noarg_func ::= TODAY", + /* 335 */ "noarg_func ::= TIMEZONE", + /* 336 */ "star_func ::= COUNT", + /* 337 */ "star_func ::= FIRST", + /* 338 */ "star_func ::= LAST", + /* 339 */ "star_func ::= LAST_ROW", + /* 340 */ "star_func_para_list ::= NK_STAR", + /* 341 */ "star_func_para_list ::= other_para_list", + /* 342 */ "other_para_list ::= star_func_para", + /* 343 */ "other_para_list ::= other_para_list NK_COMMA star_func_para", + /* 344 */ "star_func_para ::= expression", + /* 345 */ "star_func_para ::= table_name NK_DOT NK_STAR", + /* 346 */ "predicate ::= expression compare_op expression", + /* 347 */ "predicate ::= expression BETWEEN expression AND expression", + /* 348 */ "predicate ::= expression NOT BETWEEN expression AND expression", + /* 349 */ "predicate ::= expression IS NULL", + /* 350 */ "predicate ::= expression IS NOT NULL", + /* 351 */ "predicate ::= expression in_op in_predicate_value", + /* 352 */ "compare_op ::= NK_LT", + /* 353 */ "compare_op ::= NK_GT", + /* 354 */ "compare_op ::= NK_LE", + /* 355 */ "compare_op ::= NK_GE", + /* 356 */ "compare_op ::= NK_NE", + /* 357 */ "compare_op ::= NK_EQ", + /* 358 */ "compare_op ::= LIKE", + /* 359 */ "compare_op ::= NOT LIKE", + /* 360 */ "compare_op ::= MATCH", + /* 361 */ "compare_op ::= NMATCH", + /* 362 */ "compare_op ::= CONTAINS", + /* 363 */ "in_op ::= IN", + /* 364 */ "in_op ::= NOT IN", + /* 365 */ "in_predicate_value ::= NK_LP expression_list NK_RP", + /* 366 */ "boolean_value_expression ::= boolean_primary", + /* 367 */ "boolean_value_expression ::= NOT boolean_primary", + /* 368 */ "boolean_value_expression ::= boolean_value_expression OR boolean_value_expression", + /* 369 */ "boolean_value_expression ::= boolean_value_expression AND boolean_value_expression", + /* 370 */ "boolean_primary ::= predicate", + /* 371 */ "boolean_primary ::= NK_LP boolean_value_expression NK_RP", + /* 372 */ "common_expression ::= expression", + /* 373 */ "common_expression ::= boolean_value_expression", + /* 374 */ "from_clause ::= FROM table_reference_list", + /* 375 */ "table_reference_list ::= table_reference", + /* 376 */ "table_reference_list ::= table_reference_list NK_COMMA table_reference", + /* 377 */ "table_reference ::= table_primary", + /* 378 */ "table_reference ::= joined_table", + /* 379 */ "table_primary ::= table_name alias_opt", + /* 380 */ "table_primary ::= db_name NK_DOT table_name alias_opt", + /* 381 */ "table_primary ::= subquery alias_opt", + /* 382 */ "table_primary ::= parenthesized_joined_table", + /* 383 */ "alias_opt ::=", + /* 384 */ "alias_opt ::= table_alias", + /* 385 */ "alias_opt ::= AS table_alias", + /* 386 */ "parenthesized_joined_table ::= NK_LP joined_table NK_RP", + /* 387 */ "parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP", + /* 388 */ "joined_table ::= table_reference join_type JOIN table_reference ON search_condition", + /* 389 */ "join_type ::=", + /* 390 */ "join_type ::= INNER", + /* 391 */ "query_specification ::= SELECT set_quantifier_opt select_list from_clause where_clause_opt partition_by_clause_opt twindow_clause_opt group_by_clause_opt having_clause_opt", + /* 392 */ "set_quantifier_opt ::=", + /* 393 */ "set_quantifier_opt ::= DISTINCT", + /* 394 */ "set_quantifier_opt ::= ALL", + /* 395 */ "select_list ::= NK_STAR", + /* 396 */ "select_list ::= select_sublist", + /* 397 */ "select_sublist ::= select_item", + /* 398 */ "select_sublist ::= select_sublist NK_COMMA select_item", + /* 399 */ "select_item ::= common_expression", + /* 400 */ "select_item ::= common_expression column_alias", + /* 401 */ "select_item ::= common_expression AS column_alias", + /* 402 */ "select_item ::= table_name NK_DOT NK_STAR", + /* 403 */ "where_clause_opt ::=", + /* 404 */ "where_clause_opt ::= WHERE search_condition", + /* 405 */ "partition_by_clause_opt ::=", + /* 406 */ "partition_by_clause_opt ::= PARTITION BY expression_list", + /* 407 */ "twindow_clause_opt ::=", + /* 408 */ "twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP", + /* 409 */ "twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP", + /* 410 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt", + /* 411 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt", + /* 412 */ "sliding_opt ::=", + /* 413 */ "sliding_opt ::= SLIDING NK_LP duration_literal NK_RP", + /* 414 */ "fill_opt ::=", + /* 415 */ "fill_opt ::= FILL NK_LP fill_mode NK_RP", + /* 416 */ "fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP", + /* 417 */ "fill_mode ::= NONE", + /* 418 */ "fill_mode ::= PREV", + /* 419 */ "fill_mode ::= NULL", + /* 420 */ "fill_mode ::= LINEAR", + /* 421 */ "fill_mode ::= NEXT", + /* 422 */ "group_by_clause_opt ::=", + /* 423 */ "group_by_clause_opt ::= GROUP BY group_by_list", + /* 424 */ "group_by_list ::= expression", + /* 425 */ "group_by_list ::= group_by_list NK_COMMA expression", + /* 426 */ "having_clause_opt ::=", + /* 427 */ "having_clause_opt ::= HAVING search_condition", + /* 428 */ "query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt", + /* 429 */ "query_expression_body ::= query_primary", + /* 430 */ "query_expression_body ::= query_expression_body UNION ALL query_expression_body", + /* 431 */ "query_expression_body ::= query_expression_body UNION query_expression_body", + /* 432 */ "query_primary ::= query_specification", + /* 433 */ "query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP", + /* 434 */ "order_by_clause_opt ::=", + /* 435 */ "order_by_clause_opt ::= ORDER BY sort_specification_list", + /* 436 */ "slimit_clause_opt ::=", + /* 437 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER", + /* 438 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER", + /* 439 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER", + /* 440 */ "limit_clause_opt ::=", + /* 441 */ "limit_clause_opt ::= LIMIT NK_INTEGER", + /* 442 */ "limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER", + /* 443 */ "limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER", + /* 444 */ "subquery ::= NK_LP query_expression NK_RP", + /* 445 */ "search_condition ::= common_expression", + /* 446 */ "sort_specification_list ::= sort_specification", + /* 447 */ "sort_specification_list ::= sort_specification_list NK_COMMA sort_specification", + /* 448 */ "sort_specification ::= expression ordering_specification_opt null_ordering_opt", + /* 449 */ "ordering_specification_opt ::=", + /* 450 */ "ordering_specification_opt ::= ASC", + /* 451 */ "ordering_specification_opt ::= DESC", + /* 452 */ "null_ordering_opt ::=", + /* 453 */ "null_ordering_opt ::= NULLS FIRST", + /* 454 */ "null_ordering_opt ::= NULLS LAST", }; #endif /* NDEBUG */ @@ -2093,174 +2104,175 @@ static void yy_destructor( */ /********* Begin destructor definitions ***************************************/ /* Default NON-TERMINAL Destructor */ - case 238: /* cmd */ - case 241: /* literal */ - case 252: /* db_options */ - case 254: /* alter_db_options */ - case 259: /* retention */ - case 260: /* full_table_name */ - case 263: /* table_options */ - case 267: /* alter_table_clause */ - case 268: /* alter_table_options */ - case 271: /* signed_literal */ - case 272: /* create_subtable_clause */ - case 275: /* drop_table_clause */ - case 278: /* column_def */ - case 281: /* col_name */ - case 282: /* db_name_cond_opt */ - case 283: /* like_pattern_opt */ - case 284: /* table_name_cond */ - case 285: /* from_db_opt */ - case 286: /* func_name */ - case 289: /* index_options */ - case 291: /* duration_literal */ - case 292: /* sliding_opt */ - case 293: /* func */ - case 296: /* topic_options */ - case 297: /* query_expression */ - case 299: /* explain_options */ - case 303: /* stream_options */ - case 304: /* into_opt */ - case 306: /* signed */ - case 307: /* literal_func */ - case 310: /* expression */ - case 311: /* pseudo_column */ - case 312: /* column_reference */ - case 313: /* function_expression */ - case 314: /* subquery */ - case 319: /* star_func_para */ - case 320: /* predicate */ - case 323: /* in_predicate_value */ - case 324: /* boolean_value_expression */ - case 325: /* boolean_primary */ - case 326: /* common_expression */ - case 327: /* from_clause */ - case 328: /* table_reference_list */ - case 329: /* table_reference */ - case 330: /* table_primary */ - case 331: /* joined_table */ - case 333: /* parenthesized_joined_table */ - case 335: /* search_condition */ - case 336: /* query_specification */ - case 339: /* where_clause_opt */ - case 341: /* twindow_clause_opt */ - case 343: /* having_clause_opt */ - case 345: /* select_item */ - case 346: /* fill_opt */ - case 349: /* query_expression_body */ - case 351: /* slimit_clause_opt */ - case 352: /* limit_clause_opt */ - case 353: /* query_primary */ - case 355: /* sort_specification */ + case 240: /* cmd */ + case 243: /* literal */ + case 254: /* db_options */ + case 256: /* alter_db_options */ + case 261: /* retention */ + case 262: /* full_table_name */ + case 265: /* table_options */ + case 269: /* alter_table_clause */ + case 270: /* alter_table_options */ + case 273: /* signed_literal */ + case 274: /* create_subtable_clause */ + case 277: /* drop_table_clause */ + case 280: /* column_def */ + case 283: /* col_name */ + case 284: /* db_name_cond_opt */ + case 285: /* like_pattern_opt */ + case 286: /* table_name_cond */ + case 287: /* from_db_opt */ + case 288: /* func_name */ + case 291: /* index_options */ + case 293: /* duration_literal */ + case 294: /* sliding_opt */ + case 295: /* func */ + case 298: /* topic_options */ + case 299: /* query_expression */ + case 302: /* explain_options */ + case 306: /* stream_options */ + case 307: /* into_opt */ + case 309: /* signed */ + case 310: /* literal_func */ + case 313: /* expression */ + case 314: /* pseudo_column */ + case 315: /* column_reference */ + case 316: /* function_expression */ + case 317: /* subquery */ + case 322: /* star_func_para */ + case 323: /* predicate */ + case 326: /* in_predicate_value */ + case 327: /* boolean_value_expression */ + case 328: /* boolean_primary */ + case 329: /* common_expression */ + case 330: /* from_clause */ + case 331: /* table_reference_list */ + case 332: /* table_reference */ + case 333: /* table_primary */ + case 334: /* joined_table */ + case 336: /* parenthesized_joined_table */ + case 338: /* search_condition */ + case 339: /* query_specification */ + case 342: /* where_clause_opt */ + case 344: /* twindow_clause_opt */ + case 346: /* having_clause_opt */ + case 348: /* select_item */ + case 349: /* fill_opt */ + case 352: /* query_expression_body */ + case 354: /* slimit_clause_opt */ + case 355: /* limit_clause_opt */ + case 356: /* query_primary */ + case 358: /* sort_specification */ { - nodesDestroyNode((yypminor->yy172)); + nodesDestroyNode((yypminor->yy636)); } break; - case 239: /* account_options */ - case 240: /* alter_account_options */ - case 242: /* alter_account_option */ - case 301: /* bufsize_opt */ + case 241: /* account_options */ + case 242: /* alter_account_options */ + case 244: /* alter_account_option */ + case 304: /* bufsize_opt */ { } break; - case 243: /* user_name */ - case 245: /* priv_level */ - case 248: /* db_name */ - case 249: /* dnode_endpoint */ - case 250: /* dnode_host_name */ - case 269: /* column_name */ - case 277: /* table_name */ - case 287: /* function_name */ - case 288: /* index_name */ - case 295: /* topic_name */ - case 302: /* stream_name */ - case 308: /* table_alias */ - case 309: /* column_alias */ - case 315: /* star_func */ - case 317: /* noarg_func */ - case 332: /* alias_opt */ + case 245: /* user_name */ + case 247: /* priv_level */ + case 250: /* db_name */ + case 251: /* dnode_endpoint */ + case 252: /* dnode_host_name */ + case 271: /* column_name */ + case 279: /* table_name */ + case 289: /* function_name */ + case 290: /* index_name */ + case 297: /* topic_name */ + case 300: /* cgroup_name */ + case 305: /* stream_name */ + case 311: /* table_alias */ + case 312: /* column_alias */ + case 318: /* star_func */ + case 320: /* noarg_func */ + case 335: /* alias_opt */ { } break; - case 244: /* privileges */ - case 246: /* priv_type_list */ - case 247: /* priv_type */ + case 246: /* privileges */ + case 248: /* priv_type_list */ + case 249: /* priv_type */ { } break; - case 251: /* not_exists_opt */ - case 253: /* exists_opt */ - case 298: /* analyze_opt */ - case 300: /* agg_func_opt */ - case 337: /* set_quantifier_opt */ + case 253: /* not_exists_opt */ + case 255: /* exists_opt */ + case 301: /* analyze_opt */ + case 303: /* agg_func_opt */ + case 340: /* set_quantifier_opt */ { } break; - case 255: /* integer_list */ - case 256: /* variable_list */ - case 257: /* retention_list */ - case 261: /* column_def_list */ - case 262: /* tags_def_opt */ - case 264: /* multi_create_clause */ - case 265: /* tags_def */ - case 266: /* multi_drop_clause */ - case 273: /* specific_tags_opt */ - case 274: /* literal_list */ - case 276: /* col_name_list */ - case 279: /* func_name_list */ - case 290: /* func_list */ - case 294: /* expression_list */ - case 305: /* dnode_list */ - case 316: /* star_func_para_list */ - case 318: /* other_para_list */ - case 338: /* select_list */ - case 340: /* partition_by_clause_opt */ - case 342: /* group_by_clause_opt */ - case 344: /* select_sublist */ - case 348: /* group_by_list */ - case 350: /* order_by_clause_opt */ - case 354: /* sort_specification_list */ + case 257: /* integer_list */ + case 258: /* variable_list */ + case 259: /* retention_list */ + case 263: /* column_def_list */ + case 264: /* tags_def_opt */ + case 266: /* multi_create_clause */ + case 267: /* tags_def */ + case 268: /* multi_drop_clause */ + case 275: /* specific_tags_opt */ + case 276: /* literal_list */ + case 278: /* col_name_list */ + case 281: /* func_name_list */ + case 292: /* func_list */ + case 296: /* expression_list */ + case 308: /* dnode_list */ + case 319: /* star_func_para_list */ + case 321: /* other_para_list */ + case 341: /* select_list */ + case 343: /* partition_by_clause_opt */ + case 345: /* group_by_clause_opt */ + case 347: /* select_sublist */ + case 351: /* group_by_list */ + case 353: /* order_by_clause_opt */ + case 357: /* sort_specification_list */ { - nodesDestroyList((yypminor->yy60)); + nodesDestroyList((yypminor->yy236)); } break; - case 258: /* alter_db_option */ - case 280: /* alter_table_option */ + case 260: /* alter_db_option */ + case 282: /* alter_table_option */ { } break; - case 270: /* type_name */ + case 272: /* type_name */ { } break; - case 321: /* compare_op */ - case 322: /* in_op */ + case 324: /* compare_op */ + case 325: /* in_op */ { } break; - case 334: /* join_type */ + case 337: /* join_type */ { } break; - case 347: /* fill_mode */ + case 350: /* fill_mode */ { } break; - case 356: /* ordering_specification_opt */ + case 359: /* ordering_specification_opt */ { } break; - case 357: /* null_ordering_opt */ + case 360: /* null_ordering_opt */ { } @@ -2559,458 +2571,461 @@ static const struct { YYCODETYPE lhs; /* Symbol on the left-hand side of the rule */ signed char nrhs; /* Negative of the number of RHS symbols in the rule */ } yyRuleInfo[] = { - { 238, -6 }, /* (0) cmd ::= CREATE ACCOUNT NK_ID PASS NK_STRING account_options */ - { 238, -4 }, /* (1) cmd ::= ALTER ACCOUNT NK_ID alter_account_options */ - { 239, 0 }, /* (2) account_options ::= */ - { 239, -3 }, /* (3) account_options ::= account_options PPS literal */ - { 239, -3 }, /* (4) account_options ::= account_options TSERIES literal */ - { 239, -3 }, /* (5) account_options ::= account_options STORAGE literal */ - { 239, -3 }, /* (6) account_options ::= account_options STREAMS literal */ - { 239, -3 }, /* (7) account_options ::= account_options QTIME literal */ - { 239, -3 }, /* (8) account_options ::= account_options DBS literal */ - { 239, -3 }, /* (9) account_options ::= account_options USERS literal */ - { 239, -3 }, /* (10) account_options ::= account_options CONNS literal */ - { 239, -3 }, /* (11) account_options ::= account_options STATE literal */ - { 240, -1 }, /* (12) alter_account_options ::= alter_account_option */ - { 240, -2 }, /* (13) alter_account_options ::= alter_account_options alter_account_option */ - { 242, -2 }, /* (14) alter_account_option ::= PASS literal */ - { 242, -2 }, /* (15) alter_account_option ::= PPS literal */ - { 242, -2 }, /* (16) alter_account_option ::= TSERIES literal */ - { 242, -2 }, /* (17) alter_account_option ::= STORAGE literal */ - { 242, -2 }, /* (18) alter_account_option ::= STREAMS literal */ - { 242, -2 }, /* (19) alter_account_option ::= QTIME literal */ - { 242, -2 }, /* (20) alter_account_option ::= DBS literal */ - { 242, -2 }, /* (21) alter_account_option ::= USERS literal */ - { 242, -2 }, /* (22) alter_account_option ::= CONNS literal */ - { 242, -2 }, /* (23) alter_account_option ::= STATE literal */ - { 238, -5 }, /* (24) cmd ::= CREATE USER user_name PASS NK_STRING */ - { 238, -5 }, /* (25) cmd ::= ALTER USER user_name PASS NK_STRING */ - { 238, -5 }, /* (26) cmd ::= ALTER USER user_name PRIVILEGE NK_STRING */ - { 238, -3 }, /* (27) cmd ::= DROP USER user_name */ - { 238, -6 }, /* (28) cmd ::= GRANT privileges ON priv_level TO user_name */ - { 238, -6 }, /* (29) cmd ::= REVOKE privileges ON priv_level FROM user_name */ - { 244, -1 }, /* (30) privileges ::= ALL */ - { 244, -1 }, /* (31) privileges ::= priv_type_list */ - { 246, -1 }, /* (32) priv_type_list ::= priv_type */ - { 246, -3 }, /* (33) priv_type_list ::= priv_type_list NK_COMMA priv_type */ - { 247, -1 }, /* (34) priv_type ::= READ */ - { 247, -1 }, /* (35) priv_type ::= WRITE */ - { 245, -3 }, /* (36) priv_level ::= NK_STAR NK_DOT NK_STAR */ - { 245, -3 }, /* (37) priv_level ::= db_name NK_DOT NK_STAR */ - { 238, -3 }, /* (38) cmd ::= CREATE DNODE dnode_endpoint */ - { 238, -5 }, /* (39) cmd ::= CREATE DNODE dnode_host_name PORT NK_INTEGER */ - { 238, -3 }, /* (40) cmd ::= DROP DNODE NK_INTEGER */ - { 238, -3 }, /* (41) cmd ::= DROP DNODE dnode_endpoint */ - { 238, -4 }, /* (42) cmd ::= ALTER DNODE NK_INTEGER NK_STRING */ - { 238, -5 }, /* (43) cmd ::= ALTER DNODE NK_INTEGER NK_STRING NK_STRING */ - { 238, -4 }, /* (44) cmd ::= ALTER ALL DNODES NK_STRING */ - { 238, -5 }, /* (45) cmd ::= ALTER ALL DNODES NK_STRING NK_STRING */ - { 249, -1 }, /* (46) dnode_endpoint ::= NK_STRING */ - { 250, -1 }, /* (47) dnode_host_name ::= NK_ID */ - { 250, -1 }, /* (48) dnode_host_name ::= NK_IPTOKEN */ - { 238, -3 }, /* (49) cmd ::= ALTER LOCAL NK_STRING */ - { 238, -4 }, /* (50) cmd ::= ALTER LOCAL NK_STRING NK_STRING */ - { 238, -5 }, /* (51) cmd ::= CREATE QNODE ON DNODE NK_INTEGER */ - { 238, -5 }, /* (52) cmd ::= DROP QNODE ON DNODE NK_INTEGER */ - { 238, -5 }, /* (53) cmd ::= CREATE BNODE ON DNODE NK_INTEGER */ - { 238, -5 }, /* (54) cmd ::= DROP BNODE ON DNODE NK_INTEGER */ - { 238, -5 }, /* (55) cmd ::= CREATE SNODE ON DNODE NK_INTEGER */ - { 238, -5 }, /* (56) cmd ::= DROP SNODE ON DNODE NK_INTEGER */ - { 238, -5 }, /* (57) cmd ::= CREATE MNODE ON DNODE NK_INTEGER */ - { 238, -5 }, /* (58) cmd ::= DROP MNODE ON DNODE NK_INTEGER */ - { 238, -5 }, /* (59) cmd ::= CREATE DATABASE not_exists_opt db_name db_options */ - { 238, -4 }, /* (60) cmd ::= DROP DATABASE exists_opt db_name */ - { 238, -2 }, /* (61) cmd ::= USE db_name */ - { 238, -4 }, /* (62) cmd ::= ALTER DATABASE db_name alter_db_options */ - { 251, -3 }, /* (63) not_exists_opt ::= IF NOT EXISTS */ - { 251, 0 }, /* (64) not_exists_opt ::= */ - { 253, -2 }, /* (65) exists_opt ::= IF EXISTS */ - { 253, 0 }, /* (66) exists_opt ::= */ - { 252, 0 }, /* (67) db_options ::= */ - { 252, -3 }, /* (68) db_options ::= db_options BUFFER NK_INTEGER */ - { 252, -3 }, /* (69) db_options ::= db_options CACHELAST NK_INTEGER */ - { 252, -3 }, /* (70) db_options ::= db_options COMP NK_INTEGER */ - { 252, -3 }, /* (71) db_options ::= db_options DAYS NK_INTEGER */ - { 252, -3 }, /* (72) db_options ::= db_options DAYS NK_VARIABLE */ - { 252, -3 }, /* (73) db_options ::= db_options FSYNC NK_INTEGER */ - { 252, -3 }, /* (74) db_options ::= db_options MAXROWS NK_INTEGER */ - { 252, -3 }, /* (75) db_options ::= db_options MINROWS NK_INTEGER */ - { 252, -3 }, /* (76) db_options ::= db_options KEEP integer_list */ - { 252, -3 }, /* (77) db_options ::= db_options KEEP variable_list */ - { 252, -3 }, /* (78) db_options ::= db_options PAGES NK_INTEGER */ - { 252, -3 }, /* (79) db_options ::= db_options PAGESIZE NK_INTEGER */ - { 252, -3 }, /* (80) db_options ::= db_options PRECISION NK_STRING */ - { 252, -3 }, /* (81) db_options ::= db_options REPLICA NK_INTEGER */ - { 252, -3 }, /* (82) db_options ::= db_options STRICT NK_INTEGER */ - { 252, -3 }, /* (83) db_options ::= db_options WAL NK_INTEGER */ - { 252, -3 }, /* (84) db_options ::= db_options VGROUPS NK_INTEGER */ - { 252, -3 }, /* (85) db_options ::= db_options SINGLE_STABLE NK_INTEGER */ - { 252, -3 }, /* (86) db_options ::= db_options RETENTIONS retention_list */ - { 254, -1 }, /* (87) alter_db_options ::= alter_db_option */ - { 254, -2 }, /* (88) alter_db_options ::= alter_db_options alter_db_option */ - { 258, -2 }, /* (89) alter_db_option ::= BUFFER NK_INTEGER */ - { 258, -2 }, /* (90) alter_db_option ::= CACHELAST NK_INTEGER */ - { 258, -2 }, /* (91) alter_db_option ::= FSYNC NK_INTEGER */ - { 258, -2 }, /* (92) alter_db_option ::= KEEP integer_list */ - { 258, -2 }, /* (93) alter_db_option ::= KEEP variable_list */ - { 258, -2 }, /* (94) alter_db_option ::= PAGES NK_INTEGER */ - { 258, -2 }, /* (95) alter_db_option ::= REPLICA NK_INTEGER */ - { 258, -2 }, /* (96) alter_db_option ::= STRICT NK_INTEGER */ - { 258, -2 }, /* (97) alter_db_option ::= WAL NK_INTEGER */ - { 255, -1 }, /* (98) integer_list ::= NK_INTEGER */ - { 255, -3 }, /* (99) integer_list ::= integer_list NK_COMMA NK_INTEGER */ - { 256, -1 }, /* (100) variable_list ::= NK_VARIABLE */ - { 256, -3 }, /* (101) variable_list ::= variable_list NK_COMMA NK_VARIABLE */ - { 257, -1 }, /* (102) retention_list ::= retention */ - { 257, -3 }, /* (103) retention_list ::= retention_list NK_COMMA retention */ - { 259, -3 }, /* (104) retention ::= NK_VARIABLE NK_COLON NK_VARIABLE */ - { 238, -9 }, /* (105) cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options */ - { 238, -3 }, /* (106) cmd ::= CREATE TABLE multi_create_clause */ - { 238, -9 }, /* (107) cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options */ - { 238, -3 }, /* (108) cmd ::= DROP TABLE multi_drop_clause */ - { 238, -4 }, /* (109) cmd ::= DROP STABLE exists_opt full_table_name */ - { 238, -3 }, /* (110) cmd ::= ALTER TABLE alter_table_clause */ - { 238, -3 }, /* (111) cmd ::= ALTER STABLE alter_table_clause */ - { 267, -2 }, /* (112) alter_table_clause ::= full_table_name alter_table_options */ - { 267, -5 }, /* (113) alter_table_clause ::= full_table_name ADD COLUMN column_name type_name */ - { 267, -4 }, /* (114) alter_table_clause ::= full_table_name DROP COLUMN column_name */ - { 267, -5 }, /* (115) alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name */ - { 267, -5 }, /* (116) alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name */ - { 267, -5 }, /* (117) alter_table_clause ::= full_table_name ADD TAG column_name type_name */ - { 267, -4 }, /* (118) alter_table_clause ::= full_table_name DROP TAG column_name */ - { 267, -5 }, /* (119) alter_table_clause ::= full_table_name MODIFY TAG column_name type_name */ - { 267, -5 }, /* (120) alter_table_clause ::= full_table_name RENAME TAG column_name column_name */ - { 267, -6 }, /* (121) alter_table_clause ::= full_table_name SET TAG column_name NK_EQ signed_literal */ - { 264, -1 }, /* (122) multi_create_clause ::= create_subtable_clause */ - { 264, -2 }, /* (123) multi_create_clause ::= multi_create_clause create_subtable_clause */ - { 272, -10 }, /* (124) create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_tags_opt TAGS NK_LP literal_list NK_RP table_options */ - { 266, -1 }, /* (125) multi_drop_clause ::= drop_table_clause */ - { 266, -2 }, /* (126) multi_drop_clause ::= multi_drop_clause drop_table_clause */ - { 275, -2 }, /* (127) drop_table_clause ::= exists_opt full_table_name */ - { 273, 0 }, /* (128) specific_tags_opt ::= */ - { 273, -3 }, /* (129) specific_tags_opt ::= NK_LP col_name_list NK_RP */ - { 260, -1 }, /* (130) full_table_name ::= table_name */ - { 260, -3 }, /* (131) full_table_name ::= db_name NK_DOT table_name */ - { 261, -1 }, /* (132) column_def_list ::= column_def */ - { 261, -3 }, /* (133) column_def_list ::= column_def_list NK_COMMA column_def */ - { 278, -2 }, /* (134) column_def ::= column_name type_name */ - { 278, -4 }, /* (135) column_def ::= column_name type_name COMMENT NK_STRING */ - { 270, -1 }, /* (136) type_name ::= BOOL */ - { 270, -1 }, /* (137) type_name ::= TINYINT */ - { 270, -1 }, /* (138) type_name ::= SMALLINT */ - { 270, -1 }, /* (139) type_name ::= INT */ - { 270, -1 }, /* (140) type_name ::= INTEGER */ - { 270, -1 }, /* (141) type_name ::= BIGINT */ - { 270, -1 }, /* (142) type_name ::= FLOAT */ - { 270, -1 }, /* (143) type_name ::= DOUBLE */ - { 270, -4 }, /* (144) type_name ::= BINARY NK_LP NK_INTEGER NK_RP */ - { 270, -1 }, /* (145) type_name ::= TIMESTAMP */ - { 270, -4 }, /* (146) type_name ::= NCHAR NK_LP NK_INTEGER NK_RP */ - { 270, -2 }, /* (147) type_name ::= TINYINT UNSIGNED */ - { 270, -2 }, /* (148) type_name ::= SMALLINT UNSIGNED */ - { 270, -2 }, /* (149) type_name ::= INT UNSIGNED */ - { 270, -2 }, /* (150) type_name ::= BIGINT UNSIGNED */ - { 270, -1 }, /* (151) type_name ::= JSON */ - { 270, -4 }, /* (152) type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP */ - { 270, -1 }, /* (153) type_name ::= MEDIUMBLOB */ - { 270, -1 }, /* (154) type_name ::= BLOB */ - { 270, -4 }, /* (155) type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP */ - { 270, -1 }, /* (156) type_name ::= DECIMAL */ - { 270, -4 }, /* (157) type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP */ - { 270, -6 }, /* (158) type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP */ - { 262, 0 }, /* (159) tags_def_opt ::= */ - { 262, -1 }, /* (160) tags_def_opt ::= tags_def */ - { 265, -4 }, /* (161) tags_def ::= TAGS NK_LP column_def_list NK_RP */ - { 263, 0 }, /* (162) table_options ::= */ - { 263, -3 }, /* (163) table_options ::= table_options COMMENT NK_STRING */ - { 263, -3 }, /* (164) table_options ::= table_options DELAY NK_INTEGER */ - { 263, -3 }, /* (165) table_options ::= table_options FILE_FACTOR NK_FLOAT */ - { 263, -5 }, /* (166) table_options ::= table_options ROLLUP NK_LP func_name_list NK_RP */ - { 263, -3 }, /* (167) table_options ::= table_options TTL NK_INTEGER */ - { 263, -5 }, /* (168) table_options ::= table_options SMA NK_LP col_name_list NK_RP */ - { 268, -1 }, /* (169) alter_table_options ::= alter_table_option */ - { 268, -2 }, /* (170) alter_table_options ::= alter_table_options alter_table_option */ - { 280, -2 }, /* (171) alter_table_option ::= COMMENT NK_STRING */ - { 280, -2 }, /* (172) alter_table_option ::= TTL NK_INTEGER */ - { 276, -1 }, /* (173) col_name_list ::= col_name */ - { 276, -3 }, /* (174) col_name_list ::= col_name_list NK_COMMA col_name */ - { 281, -1 }, /* (175) col_name ::= column_name */ - { 238, -2 }, /* (176) cmd ::= SHOW DNODES */ - { 238, -2 }, /* (177) cmd ::= SHOW USERS */ - { 238, -2 }, /* (178) cmd ::= SHOW DATABASES */ - { 238, -4 }, /* (179) cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt */ - { 238, -4 }, /* (180) cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt */ - { 238, -3 }, /* (181) cmd ::= SHOW db_name_cond_opt VGROUPS */ - { 238, -2 }, /* (182) cmd ::= SHOW MNODES */ - { 238, -2 }, /* (183) cmd ::= SHOW MODULES */ - { 238, -2 }, /* (184) cmd ::= SHOW QNODES */ - { 238, -2 }, /* (185) cmd ::= SHOW FUNCTIONS */ - { 238, -5 }, /* (186) cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt */ - { 238, -2 }, /* (187) cmd ::= SHOW STREAMS */ - { 238, -2 }, /* (188) cmd ::= SHOW ACCOUNTS */ - { 238, -2 }, /* (189) cmd ::= SHOW APPS */ - { 238, -2 }, /* (190) cmd ::= SHOW CONNECTIONS */ - { 238, -2 }, /* (191) cmd ::= SHOW LICENCE */ - { 238, -2 }, /* (192) cmd ::= SHOW GRANTS */ - { 238, -4 }, /* (193) cmd ::= SHOW CREATE DATABASE db_name */ - { 238, -4 }, /* (194) cmd ::= SHOW CREATE TABLE full_table_name */ - { 238, -4 }, /* (195) cmd ::= SHOW CREATE STABLE full_table_name */ - { 238, -2 }, /* (196) cmd ::= SHOW QUERIES */ - { 238, -2 }, /* (197) cmd ::= SHOW SCORES */ - { 238, -2 }, /* (198) cmd ::= SHOW TOPICS */ - { 238, -2 }, /* (199) cmd ::= SHOW VARIABLES */ - { 238, -2 }, /* (200) cmd ::= SHOW BNODES */ - { 238, -2 }, /* (201) cmd ::= SHOW SNODES */ - { 238, -2 }, /* (202) cmd ::= SHOW CLUSTER */ - { 238, -2 }, /* (203) cmd ::= SHOW TRANSACTIONS */ - { 282, 0 }, /* (204) db_name_cond_opt ::= */ - { 282, -2 }, /* (205) db_name_cond_opt ::= db_name NK_DOT */ - { 283, 0 }, /* (206) like_pattern_opt ::= */ - { 283, -2 }, /* (207) like_pattern_opt ::= LIKE NK_STRING */ - { 284, -1 }, /* (208) table_name_cond ::= table_name */ - { 285, 0 }, /* (209) from_db_opt ::= */ - { 285, -2 }, /* (210) from_db_opt ::= FROM db_name */ - { 279, -1 }, /* (211) func_name_list ::= func_name */ - { 279, -3 }, /* (212) func_name_list ::= func_name_list NK_COMMA func_name */ - { 286, -1 }, /* (213) func_name ::= function_name */ - { 238, -8 }, /* (214) cmd ::= CREATE SMA INDEX not_exists_opt index_name ON table_name index_options */ - { 238, -10 }, /* (215) cmd ::= CREATE FULLTEXT INDEX not_exists_opt index_name ON table_name NK_LP col_name_list NK_RP */ - { 238, -6 }, /* (216) cmd ::= DROP INDEX exists_opt index_name ON table_name */ - { 289, 0 }, /* (217) index_options ::= */ - { 289, -9 }, /* (218) index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt */ - { 289, -11 }, /* (219) index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt */ - { 290, -1 }, /* (220) func_list ::= func */ - { 290, -3 }, /* (221) func_list ::= func_list NK_COMMA func */ - { 293, -4 }, /* (222) func ::= function_name NK_LP expression_list NK_RP */ - { 238, -7 }, /* (223) cmd ::= CREATE TOPIC not_exists_opt topic_name topic_options AS query_expression */ - { 238, -7 }, /* (224) cmd ::= CREATE TOPIC not_exists_opt topic_name topic_options AS db_name */ - { 238, -4 }, /* (225) cmd ::= DROP TOPIC exists_opt topic_name */ - { 296, 0 }, /* (226) topic_options ::= */ - { 296, -3 }, /* (227) topic_options ::= topic_options WITH TABLE */ - { 296, -3 }, /* (228) topic_options ::= topic_options WITH SCHEMA */ - { 296, -3 }, /* (229) topic_options ::= topic_options WITH TAG */ - { 238, -2 }, /* (230) cmd ::= DESC full_table_name */ - { 238, -2 }, /* (231) cmd ::= DESCRIBE full_table_name */ - { 238, -3 }, /* (232) cmd ::= RESET QUERY CACHE */ - { 238, -4 }, /* (233) cmd ::= EXPLAIN analyze_opt explain_options query_expression */ - { 298, 0 }, /* (234) analyze_opt ::= */ - { 298, -1 }, /* (235) analyze_opt ::= ANALYZE */ - { 299, 0 }, /* (236) explain_options ::= */ - { 299, -3 }, /* (237) explain_options ::= explain_options VERBOSE NK_BOOL */ - { 299, -3 }, /* (238) explain_options ::= explain_options RATIO NK_FLOAT */ - { 238, -6 }, /* (239) cmd ::= COMPACT VNODES IN NK_LP integer_list NK_RP */ - { 238, -10 }, /* (240) cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt */ - { 238, -4 }, /* (241) cmd ::= DROP FUNCTION exists_opt function_name */ - { 300, 0 }, /* (242) agg_func_opt ::= */ - { 300, -1 }, /* (243) agg_func_opt ::= AGGREGATE */ - { 301, 0 }, /* (244) bufsize_opt ::= */ - { 301, -2 }, /* (245) bufsize_opt ::= BUFSIZE NK_INTEGER */ - { 238, -8 }, /* (246) cmd ::= CREATE STREAM not_exists_opt stream_name stream_options into_opt AS query_expression */ - { 238, -4 }, /* (247) cmd ::= DROP STREAM exists_opt stream_name */ - { 304, 0 }, /* (248) into_opt ::= */ - { 304, -2 }, /* (249) into_opt ::= INTO full_table_name */ - { 303, 0 }, /* (250) stream_options ::= */ - { 303, -3 }, /* (251) stream_options ::= stream_options TRIGGER AT_ONCE */ - { 303, -3 }, /* (252) stream_options ::= stream_options TRIGGER WINDOW_CLOSE */ - { 303, -3 }, /* (253) stream_options ::= stream_options WATERMARK duration_literal */ - { 238, -3 }, /* (254) cmd ::= KILL CONNECTION NK_INTEGER */ - { 238, -3 }, /* (255) cmd ::= KILL QUERY NK_INTEGER */ - { 238, -3 }, /* (256) cmd ::= KILL TRANSACTION NK_INTEGER */ - { 238, -4 }, /* (257) cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */ - { 238, -4 }, /* (258) cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */ - { 238, -3 }, /* (259) cmd ::= SPLIT VGROUP NK_INTEGER */ - { 305, -2 }, /* (260) dnode_list ::= DNODE NK_INTEGER */ - { 305, -3 }, /* (261) dnode_list ::= dnode_list DNODE NK_INTEGER */ - { 238, -3 }, /* (262) cmd ::= SYNCDB db_name REPLICA */ - { 238, -1 }, /* (263) cmd ::= query_expression */ - { 241, -1 }, /* (264) literal ::= NK_INTEGER */ - { 241, -1 }, /* (265) literal ::= NK_FLOAT */ - { 241, -1 }, /* (266) literal ::= NK_STRING */ - { 241, -1 }, /* (267) literal ::= NK_BOOL */ - { 241, -2 }, /* (268) literal ::= TIMESTAMP NK_STRING */ - { 241, -1 }, /* (269) literal ::= duration_literal */ - { 241, -1 }, /* (270) literal ::= NULL */ - { 241, -1 }, /* (271) literal ::= NK_QUESTION */ - { 291, -1 }, /* (272) duration_literal ::= NK_VARIABLE */ - { 306, -1 }, /* (273) signed ::= NK_INTEGER */ - { 306, -2 }, /* (274) signed ::= NK_PLUS NK_INTEGER */ - { 306, -2 }, /* (275) signed ::= NK_MINUS NK_INTEGER */ - { 306, -1 }, /* (276) signed ::= NK_FLOAT */ - { 306, -2 }, /* (277) signed ::= NK_PLUS NK_FLOAT */ - { 306, -2 }, /* (278) signed ::= NK_MINUS NK_FLOAT */ - { 271, -1 }, /* (279) signed_literal ::= signed */ - { 271, -1 }, /* (280) signed_literal ::= NK_STRING */ - { 271, -1 }, /* (281) signed_literal ::= NK_BOOL */ - { 271, -2 }, /* (282) signed_literal ::= TIMESTAMP NK_STRING */ - { 271, -1 }, /* (283) signed_literal ::= duration_literal */ - { 271, -1 }, /* (284) signed_literal ::= NULL */ - { 271, -1 }, /* (285) signed_literal ::= literal_func */ - { 274, -1 }, /* (286) literal_list ::= signed_literal */ - { 274, -3 }, /* (287) literal_list ::= literal_list NK_COMMA signed_literal */ - { 248, -1 }, /* (288) db_name ::= NK_ID */ - { 277, -1 }, /* (289) table_name ::= NK_ID */ - { 269, -1 }, /* (290) column_name ::= NK_ID */ - { 287, -1 }, /* (291) function_name ::= NK_ID */ - { 308, -1 }, /* (292) table_alias ::= NK_ID */ - { 309, -1 }, /* (293) column_alias ::= NK_ID */ - { 243, -1 }, /* (294) user_name ::= NK_ID */ - { 288, -1 }, /* (295) index_name ::= NK_ID */ - { 295, -1 }, /* (296) topic_name ::= NK_ID */ - { 302, -1 }, /* (297) stream_name ::= NK_ID */ - { 310, -1 }, /* (298) expression ::= literal */ - { 310, -1 }, /* (299) expression ::= pseudo_column */ - { 310, -1 }, /* (300) expression ::= column_reference */ - { 310, -1 }, /* (301) expression ::= function_expression */ - { 310, -1 }, /* (302) expression ::= subquery */ - { 310, -3 }, /* (303) expression ::= NK_LP expression NK_RP */ - { 310, -2 }, /* (304) expression ::= NK_PLUS expression */ - { 310, -2 }, /* (305) expression ::= NK_MINUS expression */ - { 310, -3 }, /* (306) expression ::= expression NK_PLUS expression */ - { 310, -3 }, /* (307) expression ::= expression NK_MINUS expression */ - { 310, -3 }, /* (308) expression ::= expression NK_STAR expression */ - { 310, -3 }, /* (309) expression ::= expression NK_SLASH expression */ - { 310, -3 }, /* (310) expression ::= expression NK_REM expression */ - { 310, -3 }, /* (311) expression ::= column_reference NK_ARROW NK_STRING */ - { 294, -1 }, /* (312) expression_list ::= expression */ - { 294, -3 }, /* (313) expression_list ::= expression_list NK_COMMA expression */ - { 312, -1 }, /* (314) column_reference ::= column_name */ - { 312, -3 }, /* (315) column_reference ::= table_name NK_DOT column_name */ - { 311, -1 }, /* (316) pseudo_column ::= ROWTS */ - { 311, -1 }, /* (317) pseudo_column ::= TBNAME */ - { 311, -3 }, /* (318) pseudo_column ::= table_name NK_DOT TBNAME */ - { 311, -1 }, /* (319) pseudo_column ::= QSTARTTS */ - { 311, -1 }, /* (320) pseudo_column ::= QENDTS */ - { 311, -1 }, /* (321) pseudo_column ::= WSTARTTS */ - { 311, -1 }, /* (322) pseudo_column ::= WENDTS */ - { 311, -1 }, /* (323) pseudo_column ::= WDURATION */ - { 313, -4 }, /* (324) function_expression ::= function_name NK_LP expression_list NK_RP */ - { 313, -4 }, /* (325) function_expression ::= star_func NK_LP star_func_para_list NK_RP */ - { 313, -6 }, /* (326) function_expression ::= CAST NK_LP expression AS type_name NK_RP */ - { 313, -1 }, /* (327) function_expression ::= literal_func */ - { 307, -3 }, /* (328) literal_func ::= noarg_func NK_LP NK_RP */ - { 307, -1 }, /* (329) literal_func ::= NOW */ - { 317, -1 }, /* (330) noarg_func ::= NOW */ - { 317, -1 }, /* (331) noarg_func ::= TODAY */ - { 317, -1 }, /* (332) noarg_func ::= TIMEZONE */ - { 315, -1 }, /* (333) star_func ::= COUNT */ - { 315, -1 }, /* (334) star_func ::= FIRST */ - { 315, -1 }, /* (335) star_func ::= LAST */ - { 315, -1 }, /* (336) star_func ::= LAST_ROW */ - { 316, -1 }, /* (337) star_func_para_list ::= NK_STAR */ - { 316, -1 }, /* (338) star_func_para_list ::= other_para_list */ - { 318, -1 }, /* (339) other_para_list ::= star_func_para */ - { 318, -3 }, /* (340) other_para_list ::= other_para_list NK_COMMA star_func_para */ - { 319, -1 }, /* (341) star_func_para ::= expression */ - { 319, -3 }, /* (342) star_func_para ::= table_name NK_DOT NK_STAR */ - { 320, -3 }, /* (343) predicate ::= expression compare_op expression */ - { 320, -5 }, /* (344) predicate ::= expression BETWEEN expression AND expression */ - { 320, -6 }, /* (345) predicate ::= expression NOT BETWEEN expression AND expression */ - { 320, -3 }, /* (346) predicate ::= expression IS NULL */ - { 320, -4 }, /* (347) predicate ::= expression IS NOT NULL */ - { 320, -3 }, /* (348) predicate ::= expression in_op in_predicate_value */ - { 321, -1 }, /* (349) compare_op ::= NK_LT */ - { 321, -1 }, /* (350) compare_op ::= NK_GT */ - { 321, -1 }, /* (351) compare_op ::= NK_LE */ - { 321, -1 }, /* (352) compare_op ::= NK_GE */ - { 321, -1 }, /* (353) compare_op ::= NK_NE */ - { 321, -1 }, /* (354) compare_op ::= NK_EQ */ - { 321, -1 }, /* (355) compare_op ::= LIKE */ - { 321, -2 }, /* (356) compare_op ::= NOT LIKE */ - { 321, -1 }, /* (357) compare_op ::= MATCH */ - { 321, -1 }, /* (358) compare_op ::= NMATCH */ - { 321, -1 }, /* (359) compare_op ::= CONTAINS */ - { 322, -1 }, /* (360) in_op ::= IN */ - { 322, -2 }, /* (361) in_op ::= NOT IN */ - { 323, -3 }, /* (362) in_predicate_value ::= NK_LP expression_list NK_RP */ - { 324, -1 }, /* (363) boolean_value_expression ::= boolean_primary */ - { 324, -2 }, /* (364) boolean_value_expression ::= NOT boolean_primary */ - { 324, -3 }, /* (365) boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */ - { 324, -3 }, /* (366) boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */ - { 325, -1 }, /* (367) boolean_primary ::= predicate */ - { 325, -3 }, /* (368) boolean_primary ::= NK_LP boolean_value_expression NK_RP */ - { 326, -1 }, /* (369) common_expression ::= expression */ - { 326, -1 }, /* (370) common_expression ::= boolean_value_expression */ - { 327, -2 }, /* (371) from_clause ::= FROM table_reference_list */ - { 328, -1 }, /* (372) table_reference_list ::= table_reference */ - { 328, -3 }, /* (373) table_reference_list ::= table_reference_list NK_COMMA table_reference */ - { 329, -1 }, /* (374) table_reference ::= table_primary */ - { 329, -1 }, /* (375) table_reference ::= joined_table */ - { 330, -2 }, /* (376) table_primary ::= table_name alias_opt */ - { 330, -4 }, /* (377) table_primary ::= db_name NK_DOT table_name alias_opt */ - { 330, -2 }, /* (378) table_primary ::= subquery alias_opt */ - { 330, -1 }, /* (379) table_primary ::= parenthesized_joined_table */ - { 332, 0 }, /* (380) alias_opt ::= */ - { 332, -1 }, /* (381) alias_opt ::= table_alias */ - { 332, -2 }, /* (382) alias_opt ::= AS table_alias */ - { 333, -3 }, /* (383) parenthesized_joined_table ::= NK_LP joined_table NK_RP */ - { 333, -3 }, /* (384) parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ - { 331, -6 }, /* (385) joined_table ::= table_reference join_type JOIN table_reference ON search_condition */ - { 334, 0 }, /* (386) join_type ::= */ - { 334, -1 }, /* (387) join_type ::= INNER */ - { 336, -9 }, /* (388) query_specification ::= SELECT set_quantifier_opt select_list from_clause where_clause_opt partition_by_clause_opt twindow_clause_opt group_by_clause_opt having_clause_opt */ - { 337, 0 }, /* (389) set_quantifier_opt ::= */ - { 337, -1 }, /* (390) set_quantifier_opt ::= DISTINCT */ - { 337, -1 }, /* (391) set_quantifier_opt ::= ALL */ - { 338, -1 }, /* (392) select_list ::= NK_STAR */ - { 338, -1 }, /* (393) select_list ::= select_sublist */ - { 344, -1 }, /* (394) select_sublist ::= select_item */ - { 344, -3 }, /* (395) select_sublist ::= select_sublist NK_COMMA select_item */ - { 345, -1 }, /* (396) select_item ::= common_expression */ - { 345, -2 }, /* (397) select_item ::= common_expression column_alias */ - { 345, -3 }, /* (398) select_item ::= common_expression AS column_alias */ - { 345, -3 }, /* (399) select_item ::= table_name NK_DOT NK_STAR */ - { 339, 0 }, /* (400) where_clause_opt ::= */ - { 339, -2 }, /* (401) where_clause_opt ::= WHERE search_condition */ - { 340, 0 }, /* (402) partition_by_clause_opt ::= */ - { 340, -3 }, /* (403) partition_by_clause_opt ::= PARTITION BY expression_list */ - { 341, 0 }, /* (404) twindow_clause_opt ::= */ - { 341, -6 }, /* (405) twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */ - { 341, -4 }, /* (406) twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP */ - { 341, -6 }, /* (407) twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */ - { 341, -8 }, /* (408) twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */ - { 292, 0 }, /* (409) sliding_opt ::= */ - { 292, -4 }, /* (410) sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */ - { 346, 0 }, /* (411) fill_opt ::= */ - { 346, -4 }, /* (412) fill_opt ::= FILL NK_LP fill_mode NK_RP */ - { 346, -6 }, /* (413) fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */ - { 347, -1 }, /* (414) fill_mode ::= NONE */ - { 347, -1 }, /* (415) fill_mode ::= PREV */ - { 347, -1 }, /* (416) fill_mode ::= NULL */ - { 347, -1 }, /* (417) fill_mode ::= LINEAR */ - { 347, -1 }, /* (418) fill_mode ::= NEXT */ - { 342, 0 }, /* (419) group_by_clause_opt ::= */ - { 342, -3 }, /* (420) group_by_clause_opt ::= GROUP BY group_by_list */ - { 348, -1 }, /* (421) group_by_list ::= expression */ - { 348, -3 }, /* (422) group_by_list ::= group_by_list NK_COMMA expression */ - { 343, 0 }, /* (423) having_clause_opt ::= */ - { 343, -2 }, /* (424) having_clause_opt ::= HAVING search_condition */ - { 297, -4 }, /* (425) query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt */ - { 349, -1 }, /* (426) query_expression_body ::= query_primary */ - { 349, -4 }, /* (427) query_expression_body ::= query_expression_body UNION ALL query_expression_body */ - { 349, -3 }, /* (428) query_expression_body ::= query_expression_body UNION query_expression_body */ - { 353, -1 }, /* (429) query_primary ::= query_specification */ - { 353, -6 }, /* (430) query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP */ - { 350, 0 }, /* (431) order_by_clause_opt ::= */ - { 350, -3 }, /* (432) order_by_clause_opt ::= ORDER BY sort_specification_list */ - { 351, 0 }, /* (433) slimit_clause_opt ::= */ - { 351, -2 }, /* (434) slimit_clause_opt ::= SLIMIT NK_INTEGER */ - { 351, -4 }, /* (435) slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */ - { 351, -4 }, /* (436) slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */ - { 352, 0 }, /* (437) limit_clause_opt ::= */ - { 352, -2 }, /* (438) limit_clause_opt ::= LIMIT NK_INTEGER */ - { 352, -4 }, /* (439) limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ - { 352, -4 }, /* (440) limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ - { 314, -3 }, /* (441) subquery ::= NK_LP query_expression NK_RP */ - { 335, -1 }, /* (442) search_condition ::= common_expression */ - { 354, -1 }, /* (443) sort_specification_list ::= sort_specification */ - { 354, -3 }, /* (444) sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ - { 355, -3 }, /* (445) sort_specification ::= expression ordering_specification_opt null_ordering_opt */ - { 356, 0 }, /* (446) ordering_specification_opt ::= */ - { 356, -1 }, /* (447) ordering_specification_opt ::= ASC */ - { 356, -1 }, /* (448) ordering_specification_opt ::= DESC */ - { 357, 0 }, /* (449) null_ordering_opt ::= */ - { 357, -2 }, /* (450) null_ordering_opt ::= NULLS FIRST */ - { 357, -2 }, /* (451) null_ordering_opt ::= NULLS LAST */ + { 240, -6 }, /* (0) cmd ::= CREATE ACCOUNT NK_ID PASS NK_STRING account_options */ + { 240, -4 }, /* (1) cmd ::= ALTER ACCOUNT NK_ID alter_account_options */ + { 241, 0 }, /* (2) account_options ::= */ + { 241, -3 }, /* (3) account_options ::= account_options PPS literal */ + { 241, -3 }, /* (4) account_options ::= account_options TSERIES literal */ + { 241, -3 }, /* (5) account_options ::= account_options STORAGE literal */ + { 241, -3 }, /* (6) account_options ::= account_options STREAMS literal */ + { 241, -3 }, /* (7) account_options ::= account_options QTIME literal */ + { 241, -3 }, /* (8) account_options ::= account_options DBS literal */ + { 241, -3 }, /* (9) account_options ::= account_options USERS literal */ + { 241, -3 }, /* (10) account_options ::= account_options CONNS literal */ + { 241, -3 }, /* (11) account_options ::= account_options STATE literal */ + { 242, -1 }, /* (12) alter_account_options ::= alter_account_option */ + { 242, -2 }, /* (13) alter_account_options ::= alter_account_options alter_account_option */ + { 244, -2 }, /* (14) alter_account_option ::= PASS literal */ + { 244, -2 }, /* (15) alter_account_option ::= PPS literal */ + { 244, -2 }, /* (16) alter_account_option ::= TSERIES literal */ + { 244, -2 }, /* (17) alter_account_option ::= STORAGE literal */ + { 244, -2 }, /* (18) alter_account_option ::= STREAMS literal */ + { 244, -2 }, /* (19) alter_account_option ::= QTIME literal */ + { 244, -2 }, /* (20) alter_account_option ::= DBS literal */ + { 244, -2 }, /* (21) alter_account_option ::= USERS literal */ + { 244, -2 }, /* (22) alter_account_option ::= CONNS literal */ + { 244, -2 }, /* (23) alter_account_option ::= STATE literal */ + { 240, -5 }, /* (24) cmd ::= CREATE USER user_name PASS NK_STRING */ + { 240, -5 }, /* (25) cmd ::= ALTER USER user_name PASS NK_STRING */ + { 240, -5 }, /* (26) cmd ::= ALTER USER user_name PRIVILEGE NK_STRING */ + { 240, -3 }, /* (27) cmd ::= DROP USER user_name */ + { 240, -6 }, /* (28) cmd ::= GRANT privileges ON priv_level TO user_name */ + { 240, -6 }, /* (29) cmd ::= REVOKE privileges ON priv_level FROM user_name */ + { 246, -1 }, /* (30) privileges ::= ALL */ + { 246, -1 }, /* (31) privileges ::= priv_type_list */ + { 248, -1 }, /* (32) priv_type_list ::= priv_type */ + { 248, -3 }, /* (33) priv_type_list ::= priv_type_list NK_COMMA priv_type */ + { 249, -1 }, /* (34) priv_type ::= READ */ + { 249, -1 }, /* (35) priv_type ::= WRITE */ + { 247, -3 }, /* (36) priv_level ::= NK_STAR NK_DOT NK_STAR */ + { 247, -3 }, /* (37) priv_level ::= db_name NK_DOT NK_STAR */ + { 240, -3 }, /* (38) cmd ::= CREATE DNODE dnode_endpoint */ + { 240, -5 }, /* (39) cmd ::= CREATE DNODE dnode_host_name PORT NK_INTEGER */ + { 240, -3 }, /* (40) cmd ::= DROP DNODE NK_INTEGER */ + { 240, -3 }, /* (41) cmd ::= DROP DNODE dnode_endpoint */ + { 240, -4 }, /* (42) cmd ::= ALTER DNODE NK_INTEGER NK_STRING */ + { 240, -5 }, /* (43) cmd ::= ALTER DNODE NK_INTEGER NK_STRING NK_STRING */ + { 240, -4 }, /* (44) cmd ::= ALTER ALL DNODES NK_STRING */ + { 240, -5 }, /* (45) cmd ::= ALTER ALL DNODES NK_STRING NK_STRING */ + { 251, -1 }, /* (46) dnode_endpoint ::= NK_STRING */ + { 252, -1 }, /* (47) dnode_host_name ::= NK_ID */ + { 252, -1 }, /* (48) dnode_host_name ::= NK_IPTOKEN */ + { 240, -3 }, /* (49) cmd ::= ALTER LOCAL NK_STRING */ + { 240, -4 }, /* (50) cmd ::= ALTER LOCAL NK_STRING NK_STRING */ + { 240, -5 }, /* (51) cmd ::= CREATE QNODE ON DNODE NK_INTEGER */ + { 240, -5 }, /* (52) cmd ::= DROP QNODE ON DNODE NK_INTEGER */ + { 240, -5 }, /* (53) cmd ::= CREATE BNODE ON DNODE NK_INTEGER */ + { 240, -5 }, /* (54) cmd ::= DROP BNODE ON DNODE NK_INTEGER */ + { 240, -5 }, /* (55) cmd ::= CREATE SNODE ON DNODE NK_INTEGER */ + { 240, -5 }, /* (56) cmd ::= DROP SNODE ON DNODE NK_INTEGER */ + { 240, -5 }, /* (57) cmd ::= CREATE MNODE ON DNODE NK_INTEGER */ + { 240, -5 }, /* (58) cmd ::= DROP MNODE ON DNODE NK_INTEGER */ + { 240, -5 }, /* (59) cmd ::= CREATE DATABASE not_exists_opt db_name db_options */ + { 240, -4 }, /* (60) cmd ::= DROP DATABASE exists_opt db_name */ + { 240, -2 }, /* (61) cmd ::= USE db_name */ + { 240, -4 }, /* (62) cmd ::= ALTER DATABASE db_name alter_db_options */ + { 253, -3 }, /* (63) not_exists_opt ::= IF NOT EXISTS */ + { 253, 0 }, /* (64) not_exists_opt ::= */ + { 255, -2 }, /* (65) exists_opt ::= IF EXISTS */ + { 255, 0 }, /* (66) exists_opt ::= */ + { 254, 0 }, /* (67) db_options ::= */ + { 254, -3 }, /* (68) db_options ::= db_options BUFFER NK_INTEGER */ + { 254, -3 }, /* (69) db_options ::= db_options CACHELAST NK_INTEGER */ + { 254, -3 }, /* (70) db_options ::= db_options COMP NK_INTEGER */ + { 254, -3 }, /* (71) db_options ::= db_options DAYS NK_INTEGER */ + { 254, -3 }, /* (72) db_options ::= db_options DAYS NK_VARIABLE */ + { 254, -3 }, /* (73) db_options ::= db_options FSYNC NK_INTEGER */ + { 254, -3 }, /* (74) db_options ::= db_options MAXROWS NK_INTEGER */ + { 254, -3 }, /* (75) db_options ::= db_options MINROWS NK_INTEGER */ + { 254, -3 }, /* (76) db_options ::= db_options KEEP integer_list */ + { 254, -3 }, /* (77) db_options ::= db_options KEEP variable_list */ + { 254, -3 }, /* (78) db_options ::= db_options PAGES NK_INTEGER */ + { 254, -3 }, /* (79) db_options ::= db_options PAGESIZE NK_INTEGER */ + { 254, -3 }, /* (80) db_options ::= db_options PRECISION NK_STRING */ + { 254, -3 }, /* (81) db_options ::= db_options REPLICA NK_INTEGER */ + { 254, -3 }, /* (82) db_options ::= db_options STRICT NK_INTEGER */ + { 254, -3 }, /* (83) db_options ::= db_options WAL NK_INTEGER */ + { 254, -3 }, /* (84) db_options ::= db_options VGROUPS NK_INTEGER */ + { 254, -3 }, /* (85) db_options ::= db_options SINGLE_STABLE NK_INTEGER */ + { 254, -3 }, /* (86) db_options ::= db_options RETENTIONS retention_list */ + { 254, -3 }, /* (87) db_options ::= db_options SCHEMALESS NK_INTEGER */ + { 256, -1 }, /* (88) alter_db_options ::= alter_db_option */ + { 256, -2 }, /* (89) alter_db_options ::= alter_db_options alter_db_option */ + { 260, -2 }, /* (90) alter_db_option ::= BUFFER NK_INTEGER */ + { 260, -2 }, /* (91) alter_db_option ::= CACHELAST NK_INTEGER */ + { 260, -2 }, /* (92) alter_db_option ::= FSYNC NK_INTEGER */ + { 260, -2 }, /* (93) alter_db_option ::= KEEP integer_list */ + { 260, -2 }, /* (94) alter_db_option ::= KEEP variable_list */ + { 260, -2 }, /* (95) alter_db_option ::= PAGES NK_INTEGER */ + { 260, -2 }, /* (96) alter_db_option ::= REPLICA NK_INTEGER */ + { 260, -2 }, /* (97) alter_db_option ::= STRICT NK_INTEGER */ + { 260, -2 }, /* (98) alter_db_option ::= WAL NK_INTEGER */ + { 257, -1 }, /* (99) integer_list ::= NK_INTEGER */ + { 257, -3 }, /* (100) integer_list ::= integer_list NK_COMMA NK_INTEGER */ + { 258, -1 }, /* (101) variable_list ::= NK_VARIABLE */ + { 258, -3 }, /* (102) variable_list ::= variable_list NK_COMMA NK_VARIABLE */ + { 259, -1 }, /* (103) retention_list ::= retention */ + { 259, -3 }, /* (104) retention_list ::= retention_list NK_COMMA retention */ + { 261, -3 }, /* (105) retention ::= NK_VARIABLE NK_COLON NK_VARIABLE */ + { 240, -9 }, /* (106) cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options */ + { 240, -3 }, /* (107) cmd ::= CREATE TABLE multi_create_clause */ + { 240, -9 }, /* (108) cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options */ + { 240, -3 }, /* (109) cmd ::= DROP TABLE multi_drop_clause */ + { 240, -4 }, /* (110) cmd ::= DROP STABLE exists_opt full_table_name */ + { 240, -3 }, /* (111) cmd ::= ALTER TABLE alter_table_clause */ + { 240, -3 }, /* (112) cmd ::= ALTER STABLE alter_table_clause */ + { 269, -2 }, /* (113) alter_table_clause ::= full_table_name alter_table_options */ + { 269, -5 }, /* (114) alter_table_clause ::= full_table_name ADD COLUMN column_name type_name */ + { 269, -4 }, /* (115) alter_table_clause ::= full_table_name DROP COLUMN column_name */ + { 269, -5 }, /* (116) alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name */ + { 269, -5 }, /* (117) alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name */ + { 269, -5 }, /* (118) alter_table_clause ::= full_table_name ADD TAG column_name type_name */ + { 269, -4 }, /* (119) alter_table_clause ::= full_table_name DROP TAG column_name */ + { 269, -5 }, /* (120) alter_table_clause ::= full_table_name MODIFY TAG column_name type_name */ + { 269, -5 }, /* (121) alter_table_clause ::= full_table_name RENAME TAG column_name column_name */ + { 269, -6 }, /* (122) alter_table_clause ::= full_table_name SET TAG column_name NK_EQ signed_literal */ + { 266, -1 }, /* (123) multi_create_clause ::= create_subtable_clause */ + { 266, -2 }, /* (124) multi_create_clause ::= multi_create_clause create_subtable_clause */ + { 274, -10 }, /* (125) create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_tags_opt TAGS NK_LP literal_list NK_RP table_options */ + { 268, -1 }, /* (126) multi_drop_clause ::= drop_table_clause */ + { 268, -2 }, /* (127) multi_drop_clause ::= multi_drop_clause drop_table_clause */ + { 277, -2 }, /* (128) drop_table_clause ::= exists_opt full_table_name */ + { 275, 0 }, /* (129) specific_tags_opt ::= */ + { 275, -3 }, /* (130) specific_tags_opt ::= NK_LP col_name_list NK_RP */ + { 262, -1 }, /* (131) full_table_name ::= table_name */ + { 262, -3 }, /* (132) full_table_name ::= db_name NK_DOT table_name */ + { 263, -1 }, /* (133) column_def_list ::= column_def */ + { 263, -3 }, /* (134) column_def_list ::= column_def_list NK_COMMA column_def */ + { 280, -2 }, /* (135) column_def ::= column_name type_name */ + { 280, -4 }, /* (136) column_def ::= column_name type_name COMMENT NK_STRING */ + { 272, -1 }, /* (137) type_name ::= BOOL */ + { 272, -1 }, /* (138) type_name ::= TINYINT */ + { 272, -1 }, /* (139) type_name ::= SMALLINT */ + { 272, -1 }, /* (140) type_name ::= INT */ + { 272, -1 }, /* (141) type_name ::= INTEGER */ + { 272, -1 }, /* (142) type_name ::= BIGINT */ + { 272, -1 }, /* (143) type_name ::= FLOAT */ + { 272, -1 }, /* (144) type_name ::= DOUBLE */ + { 272, -4 }, /* (145) type_name ::= BINARY NK_LP NK_INTEGER NK_RP */ + { 272, -1 }, /* (146) type_name ::= TIMESTAMP */ + { 272, -4 }, /* (147) type_name ::= NCHAR NK_LP NK_INTEGER NK_RP */ + { 272, -2 }, /* (148) type_name ::= TINYINT UNSIGNED */ + { 272, -2 }, /* (149) type_name ::= SMALLINT UNSIGNED */ + { 272, -2 }, /* (150) type_name ::= INT UNSIGNED */ + { 272, -2 }, /* (151) type_name ::= BIGINT UNSIGNED */ + { 272, -1 }, /* (152) type_name ::= JSON */ + { 272, -4 }, /* (153) type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP */ + { 272, -1 }, /* (154) type_name ::= MEDIUMBLOB */ + { 272, -1 }, /* (155) type_name ::= BLOB */ + { 272, -4 }, /* (156) type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP */ + { 272, -1 }, /* (157) type_name ::= DECIMAL */ + { 272, -4 }, /* (158) type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP */ + { 272, -6 }, /* (159) type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP */ + { 264, 0 }, /* (160) tags_def_opt ::= */ + { 264, -1 }, /* (161) tags_def_opt ::= tags_def */ + { 267, -4 }, /* (162) tags_def ::= TAGS NK_LP column_def_list NK_RP */ + { 265, 0 }, /* (163) table_options ::= */ + { 265, -3 }, /* (164) table_options ::= table_options COMMENT NK_STRING */ + { 265, -3 }, /* (165) table_options ::= table_options DELAY NK_INTEGER */ + { 265, -3 }, /* (166) table_options ::= table_options FILE_FACTOR NK_FLOAT */ + { 265, -5 }, /* (167) table_options ::= table_options ROLLUP NK_LP func_name_list NK_RP */ + { 265, -3 }, /* (168) table_options ::= table_options TTL NK_INTEGER */ + { 265, -5 }, /* (169) table_options ::= table_options SMA NK_LP col_name_list NK_RP */ + { 270, -1 }, /* (170) alter_table_options ::= alter_table_option */ + { 270, -2 }, /* (171) alter_table_options ::= alter_table_options alter_table_option */ + { 282, -2 }, /* (172) alter_table_option ::= COMMENT NK_STRING */ + { 282, -2 }, /* (173) alter_table_option ::= TTL NK_INTEGER */ + { 278, -1 }, /* (174) col_name_list ::= col_name */ + { 278, -3 }, /* (175) col_name_list ::= col_name_list NK_COMMA col_name */ + { 283, -1 }, /* (176) col_name ::= column_name */ + { 240, -2 }, /* (177) cmd ::= SHOW DNODES */ + { 240, -2 }, /* (178) cmd ::= SHOW USERS */ + { 240, -2 }, /* (179) cmd ::= SHOW DATABASES */ + { 240, -4 }, /* (180) cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt */ + { 240, -4 }, /* (181) cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt */ + { 240, -3 }, /* (182) cmd ::= SHOW db_name_cond_opt VGROUPS */ + { 240, -2 }, /* (183) cmd ::= SHOW MNODES */ + { 240, -2 }, /* (184) cmd ::= SHOW MODULES */ + { 240, -2 }, /* (185) cmd ::= SHOW QNODES */ + { 240, -2 }, /* (186) cmd ::= SHOW FUNCTIONS */ + { 240, -5 }, /* (187) cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt */ + { 240, -2 }, /* (188) cmd ::= SHOW STREAMS */ + { 240, -2 }, /* (189) cmd ::= SHOW ACCOUNTS */ + { 240, -2 }, /* (190) cmd ::= SHOW APPS */ + { 240, -2 }, /* (191) cmd ::= SHOW CONNECTIONS */ + { 240, -2 }, /* (192) cmd ::= SHOW LICENCE */ + { 240, -2 }, /* (193) cmd ::= SHOW GRANTS */ + { 240, -4 }, /* (194) cmd ::= SHOW CREATE DATABASE db_name */ + { 240, -4 }, /* (195) cmd ::= SHOW CREATE TABLE full_table_name */ + { 240, -4 }, /* (196) cmd ::= SHOW CREATE STABLE full_table_name */ + { 240, -2 }, /* (197) cmd ::= SHOW QUERIES */ + { 240, -2 }, /* (198) cmd ::= SHOW SCORES */ + { 240, -2 }, /* (199) cmd ::= SHOW TOPICS */ + { 240, -2 }, /* (200) cmd ::= SHOW VARIABLES */ + { 240, -2 }, /* (201) cmd ::= SHOW BNODES */ + { 240, -2 }, /* (202) cmd ::= SHOW SNODES */ + { 240, -2 }, /* (203) cmd ::= SHOW CLUSTER */ + { 240, -2 }, /* (204) cmd ::= SHOW TRANSACTIONS */ + { 284, 0 }, /* (205) db_name_cond_opt ::= */ + { 284, -2 }, /* (206) db_name_cond_opt ::= db_name NK_DOT */ + { 285, 0 }, /* (207) like_pattern_opt ::= */ + { 285, -2 }, /* (208) like_pattern_opt ::= LIKE NK_STRING */ + { 286, -1 }, /* (209) table_name_cond ::= table_name */ + { 287, 0 }, /* (210) from_db_opt ::= */ + { 287, -2 }, /* (211) from_db_opt ::= FROM db_name */ + { 281, -1 }, /* (212) func_name_list ::= func_name */ + { 281, -3 }, /* (213) func_name_list ::= func_name_list NK_COMMA func_name */ + { 288, -1 }, /* (214) func_name ::= function_name */ + { 240, -8 }, /* (215) cmd ::= CREATE SMA INDEX not_exists_opt index_name ON table_name index_options */ + { 240, -10 }, /* (216) cmd ::= CREATE FULLTEXT INDEX not_exists_opt index_name ON table_name NK_LP col_name_list NK_RP */ + { 240, -6 }, /* (217) cmd ::= DROP INDEX exists_opt index_name ON table_name */ + { 291, 0 }, /* (218) index_options ::= */ + { 291, -9 }, /* (219) index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt */ + { 291, -11 }, /* (220) index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt */ + { 292, -1 }, /* (221) func_list ::= func */ + { 292, -3 }, /* (222) func_list ::= func_list NK_COMMA func */ + { 295, -4 }, /* (223) func ::= function_name NK_LP expression_list NK_RP */ + { 240, -7 }, /* (224) cmd ::= CREATE TOPIC not_exists_opt topic_name topic_options AS query_expression */ + { 240, -7 }, /* (225) cmd ::= CREATE TOPIC not_exists_opt topic_name topic_options AS db_name */ + { 240, -4 }, /* (226) cmd ::= DROP TOPIC exists_opt topic_name */ + { 240, -6 }, /* (227) cmd ::= DROP CGROUP exists_opt cgroup_name ON topic_name */ + { 298, 0 }, /* (228) topic_options ::= */ + { 298, -3 }, /* (229) topic_options ::= topic_options WITH TABLE */ + { 298, -3 }, /* (230) topic_options ::= topic_options WITH SCHEMA */ + { 298, -3 }, /* (231) topic_options ::= topic_options WITH TAG */ + { 240, -2 }, /* (232) cmd ::= DESC full_table_name */ + { 240, -2 }, /* (233) cmd ::= DESCRIBE full_table_name */ + { 240, -3 }, /* (234) cmd ::= RESET QUERY CACHE */ + { 240, -4 }, /* (235) cmd ::= EXPLAIN analyze_opt explain_options query_expression */ + { 301, 0 }, /* (236) analyze_opt ::= */ + { 301, -1 }, /* (237) analyze_opt ::= ANALYZE */ + { 302, 0 }, /* (238) explain_options ::= */ + { 302, -3 }, /* (239) explain_options ::= explain_options VERBOSE NK_BOOL */ + { 302, -3 }, /* (240) explain_options ::= explain_options RATIO NK_FLOAT */ + { 240, -6 }, /* (241) cmd ::= COMPACT VNODES IN NK_LP integer_list NK_RP */ + { 240, -10 }, /* (242) cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt */ + { 240, -4 }, /* (243) cmd ::= DROP FUNCTION exists_opt function_name */ + { 303, 0 }, /* (244) agg_func_opt ::= */ + { 303, -1 }, /* (245) agg_func_opt ::= AGGREGATE */ + { 304, 0 }, /* (246) bufsize_opt ::= */ + { 304, -2 }, /* (247) bufsize_opt ::= BUFSIZE NK_INTEGER */ + { 240, -8 }, /* (248) cmd ::= CREATE STREAM not_exists_opt stream_name stream_options into_opt AS query_expression */ + { 240, -4 }, /* (249) cmd ::= DROP STREAM exists_opt stream_name */ + { 307, 0 }, /* (250) into_opt ::= */ + { 307, -2 }, /* (251) into_opt ::= INTO full_table_name */ + { 306, 0 }, /* (252) stream_options ::= */ + { 306, -3 }, /* (253) stream_options ::= stream_options TRIGGER AT_ONCE */ + { 306, -3 }, /* (254) stream_options ::= stream_options TRIGGER WINDOW_CLOSE */ + { 306, -3 }, /* (255) stream_options ::= stream_options WATERMARK duration_literal */ + { 240, -3 }, /* (256) cmd ::= KILL CONNECTION NK_INTEGER */ + { 240, -3 }, /* (257) cmd ::= KILL QUERY NK_INTEGER */ + { 240, -3 }, /* (258) cmd ::= KILL TRANSACTION NK_INTEGER */ + { 240, -4 }, /* (259) cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */ + { 240, -4 }, /* (260) cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */ + { 240, -3 }, /* (261) cmd ::= SPLIT VGROUP NK_INTEGER */ + { 308, -2 }, /* (262) dnode_list ::= DNODE NK_INTEGER */ + { 308, -3 }, /* (263) dnode_list ::= dnode_list DNODE NK_INTEGER */ + { 240, -3 }, /* (264) cmd ::= SYNCDB db_name REPLICA */ + { 240, -1 }, /* (265) cmd ::= query_expression */ + { 243, -1 }, /* (266) literal ::= NK_INTEGER */ + { 243, -1 }, /* (267) literal ::= NK_FLOAT */ + { 243, -1 }, /* (268) literal ::= NK_STRING */ + { 243, -1 }, /* (269) literal ::= NK_BOOL */ + { 243, -2 }, /* (270) literal ::= TIMESTAMP NK_STRING */ + { 243, -1 }, /* (271) literal ::= duration_literal */ + { 243, -1 }, /* (272) literal ::= NULL */ + { 243, -1 }, /* (273) literal ::= NK_QUESTION */ + { 293, -1 }, /* (274) duration_literal ::= NK_VARIABLE */ + { 309, -1 }, /* (275) signed ::= NK_INTEGER */ + { 309, -2 }, /* (276) signed ::= NK_PLUS NK_INTEGER */ + { 309, -2 }, /* (277) signed ::= NK_MINUS NK_INTEGER */ + { 309, -1 }, /* (278) signed ::= NK_FLOAT */ + { 309, -2 }, /* (279) signed ::= NK_PLUS NK_FLOAT */ + { 309, -2 }, /* (280) signed ::= NK_MINUS NK_FLOAT */ + { 273, -1 }, /* (281) signed_literal ::= signed */ + { 273, -1 }, /* (282) signed_literal ::= NK_STRING */ + { 273, -1 }, /* (283) signed_literal ::= NK_BOOL */ + { 273, -2 }, /* (284) signed_literal ::= TIMESTAMP NK_STRING */ + { 273, -1 }, /* (285) signed_literal ::= duration_literal */ + { 273, -1 }, /* (286) signed_literal ::= NULL */ + { 273, -1 }, /* (287) signed_literal ::= literal_func */ + { 276, -1 }, /* (288) literal_list ::= signed_literal */ + { 276, -3 }, /* (289) literal_list ::= literal_list NK_COMMA signed_literal */ + { 250, -1 }, /* (290) db_name ::= NK_ID */ + { 279, -1 }, /* (291) table_name ::= NK_ID */ + { 271, -1 }, /* (292) column_name ::= NK_ID */ + { 289, -1 }, /* (293) function_name ::= NK_ID */ + { 311, -1 }, /* (294) table_alias ::= NK_ID */ + { 312, -1 }, /* (295) column_alias ::= NK_ID */ + { 245, -1 }, /* (296) user_name ::= NK_ID */ + { 290, -1 }, /* (297) index_name ::= NK_ID */ + { 297, -1 }, /* (298) topic_name ::= NK_ID */ + { 305, -1 }, /* (299) stream_name ::= NK_ID */ + { 300, -1 }, /* (300) cgroup_name ::= NK_ID */ + { 313, -1 }, /* (301) expression ::= literal */ + { 313, -1 }, /* (302) expression ::= pseudo_column */ + { 313, -1 }, /* (303) expression ::= column_reference */ + { 313, -1 }, /* (304) expression ::= function_expression */ + { 313, -1 }, /* (305) expression ::= subquery */ + { 313, -3 }, /* (306) expression ::= NK_LP expression NK_RP */ + { 313, -2 }, /* (307) expression ::= NK_PLUS expression */ + { 313, -2 }, /* (308) expression ::= NK_MINUS expression */ + { 313, -3 }, /* (309) expression ::= expression NK_PLUS expression */ + { 313, -3 }, /* (310) expression ::= expression NK_MINUS expression */ + { 313, -3 }, /* (311) expression ::= expression NK_STAR expression */ + { 313, -3 }, /* (312) expression ::= expression NK_SLASH expression */ + { 313, -3 }, /* (313) expression ::= expression NK_REM expression */ + { 313, -3 }, /* (314) expression ::= column_reference NK_ARROW NK_STRING */ + { 296, -1 }, /* (315) expression_list ::= expression */ + { 296, -3 }, /* (316) expression_list ::= expression_list NK_COMMA expression */ + { 315, -1 }, /* (317) column_reference ::= column_name */ + { 315, -3 }, /* (318) column_reference ::= table_name NK_DOT column_name */ + { 314, -1 }, /* (319) pseudo_column ::= ROWTS */ + { 314, -1 }, /* (320) pseudo_column ::= TBNAME */ + { 314, -3 }, /* (321) pseudo_column ::= table_name NK_DOT TBNAME */ + { 314, -1 }, /* (322) pseudo_column ::= QSTARTTS */ + { 314, -1 }, /* (323) pseudo_column ::= QENDTS */ + { 314, -1 }, /* (324) pseudo_column ::= WSTARTTS */ + { 314, -1 }, /* (325) pseudo_column ::= WENDTS */ + { 314, -1 }, /* (326) pseudo_column ::= WDURATION */ + { 316, -4 }, /* (327) function_expression ::= function_name NK_LP expression_list NK_RP */ + { 316, -4 }, /* (328) function_expression ::= star_func NK_LP star_func_para_list NK_RP */ + { 316, -6 }, /* (329) function_expression ::= CAST NK_LP expression AS type_name NK_RP */ + { 316, -1 }, /* (330) function_expression ::= literal_func */ + { 310, -3 }, /* (331) literal_func ::= noarg_func NK_LP NK_RP */ + { 310, -1 }, /* (332) literal_func ::= NOW */ + { 320, -1 }, /* (333) noarg_func ::= NOW */ + { 320, -1 }, /* (334) noarg_func ::= TODAY */ + { 320, -1 }, /* (335) noarg_func ::= TIMEZONE */ + { 318, -1 }, /* (336) star_func ::= COUNT */ + { 318, -1 }, /* (337) star_func ::= FIRST */ + { 318, -1 }, /* (338) star_func ::= LAST */ + { 318, -1 }, /* (339) star_func ::= LAST_ROW */ + { 319, -1 }, /* (340) star_func_para_list ::= NK_STAR */ + { 319, -1 }, /* (341) star_func_para_list ::= other_para_list */ + { 321, -1 }, /* (342) other_para_list ::= star_func_para */ + { 321, -3 }, /* (343) other_para_list ::= other_para_list NK_COMMA star_func_para */ + { 322, -1 }, /* (344) star_func_para ::= expression */ + { 322, -3 }, /* (345) star_func_para ::= table_name NK_DOT NK_STAR */ + { 323, -3 }, /* (346) predicate ::= expression compare_op expression */ + { 323, -5 }, /* (347) predicate ::= expression BETWEEN expression AND expression */ + { 323, -6 }, /* (348) predicate ::= expression NOT BETWEEN expression AND expression */ + { 323, -3 }, /* (349) predicate ::= expression IS NULL */ + { 323, -4 }, /* (350) predicate ::= expression IS NOT NULL */ + { 323, -3 }, /* (351) predicate ::= expression in_op in_predicate_value */ + { 324, -1 }, /* (352) compare_op ::= NK_LT */ + { 324, -1 }, /* (353) compare_op ::= NK_GT */ + { 324, -1 }, /* (354) compare_op ::= NK_LE */ + { 324, -1 }, /* (355) compare_op ::= NK_GE */ + { 324, -1 }, /* (356) compare_op ::= NK_NE */ + { 324, -1 }, /* (357) compare_op ::= NK_EQ */ + { 324, -1 }, /* (358) compare_op ::= LIKE */ + { 324, -2 }, /* (359) compare_op ::= NOT LIKE */ + { 324, -1 }, /* (360) compare_op ::= MATCH */ + { 324, -1 }, /* (361) compare_op ::= NMATCH */ + { 324, -1 }, /* (362) compare_op ::= CONTAINS */ + { 325, -1 }, /* (363) in_op ::= IN */ + { 325, -2 }, /* (364) in_op ::= NOT IN */ + { 326, -3 }, /* (365) in_predicate_value ::= NK_LP expression_list NK_RP */ + { 327, -1 }, /* (366) boolean_value_expression ::= boolean_primary */ + { 327, -2 }, /* (367) boolean_value_expression ::= NOT boolean_primary */ + { 327, -3 }, /* (368) boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */ + { 327, -3 }, /* (369) boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */ + { 328, -1 }, /* (370) boolean_primary ::= predicate */ + { 328, -3 }, /* (371) boolean_primary ::= NK_LP boolean_value_expression NK_RP */ + { 329, -1 }, /* (372) common_expression ::= expression */ + { 329, -1 }, /* (373) common_expression ::= boolean_value_expression */ + { 330, -2 }, /* (374) from_clause ::= FROM table_reference_list */ + { 331, -1 }, /* (375) table_reference_list ::= table_reference */ + { 331, -3 }, /* (376) table_reference_list ::= table_reference_list NK_COMMA table_reference */ + { 332, -1 }, /* (377) table_reference ::= table_primary */ + { 332, -1 }, /* (378) table_reference ::= joined_table */ + { 333, -2 }, /* (379) table_primary ::= table_name alias_opt */ + { 333, -4 }, /* (380) table_primary ::= db_name NK_DOT table_name alias_opt */ + { 333, -2 }, /* (381) table_primary ::= subquery alias_opt */ + { 333, -1 }, /* (382) table_primary ::= parenthesized_joined_table */ + { 335, 0 }, /* (383) alias_opt ::= */ + { 335, -1 }, /* (384) alias_opt ::= table_alias */ + { 335, -2 }, /* (385) alias_opt ::= AS table_alias */ + { 336, -3 }, /* (386) parenthesized_joined_table ::= NK_LP joined_table NK_RP */ + { 336, -3 }, /* (387) parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ + { 334, -6 }, /* (388) joined_table ::= table_reference join_type JOIN table_reference ON search_condition */ + { 337, 0 }, /* (389) join_type ::= */ + { 337, -1 }, /* (390) join_type ::= INNER */ + { 339, -9 }, /* (391) query_specification ::= SELECT set_quantifier_opt select_list from_clause where_clause_opt partition_by_clause_opt twindow_clause_opt group_by_clause_opt having_clause_opt */ + { 340, 0 }, /* (392) set_quantifier_opt ::= */ + { 340, -1 }, /* (393) set_quantifier_opt ::= DISTINCT */ + { 340, -1 }, /* (394) set_quantifier_opt ::= ALL */ + { 341, -1 }, /* (395) select_list ::= NK_STAR */ + { 341, -1 }, /* (396) select_list ::= select_sublist */ + { 347, -1 }, /* (397) select_sublist ::= select_item */ + { 347, -3 }, /* (398) select_sublist ::= select_sublist NK_COMMA select_item */ + { 348, -1 }, /* (399) select_item ::= common_expression */ + { 348, -2 }, /* (400) select_item ::= common_expression column_alias */ + { 348, -3 }, /* (401) select_item ::= common_expression AS column_alias */ + { 348, -3 }, /* (402) select_item ::= table_name NK_DOT NK_STAR */ + { 342, 0 }, /* (403) where_clause_opt ::= */ + { 342, -2 }, /* (404) where_clause_opt ::= WHERE search_condition */ + { 343, 0 }, /* (405) partition_by_clause_opt ::= */ + { 343, -3 }, /* (406) partition_by_clause_opt ::= PARTITION BY expression_list */ + { 344, 0 }, /* (407) twindow_clause_opt ::= */ + { 344, -6 }, /* (408) twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */ + { 344, -4 }, /* (409) twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP */ + { 344, -6 }, /* (410) twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */ + { 344, -8 }, /* (411) twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */ + { 294, 0 }, /* (412) sliding_opt ::= */ + { 294, -4 }, /* (413) sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */ + { 349, 0 }, /* (414) fill_opt ::= */ + { 349, -4 }, /* (415) fill_opt ::= FILL NK_LP fill_mode NK_RP */ + { 349, -6 }, /* (416) fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */ + { 350, -1 }, /* (417) fill_mode ::= NONE */ + { 350, -1 }, /* (418) fill_mode ::= PREV */ + { 350, -1 }, /* (419) fill_mode ::= NULL */ + { 350, -1 }, /* (420) fill_mode ::= LINEAR */ + { 350, -1 }, /* (421) fill_mode ::= NEXT */ + { 345, 0 }, /* (422) group_by_clause_opt ::= */ + { 345, -3 }, /* (423) group_by_clause_opt ::= GROUP BY group_by_list */ + { 351, -1 }, /* (424) group_by_list ::= expression */ + { 351, -3 }, /* (425) group_by_list ::= group_by_list NK_COMMA expression */ + { 346, 0 }, /* (426) having_clause_opt ::= */ + { 346, -2 }, /* (427) having_clause_opt ::= HAVING search_condition */ + { 299, -4 }, /* (428) query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt */ + { 352, -1 }, /* (429) query_expression_body ::= query_primary */ + { 352, -4 }, /* (430) query_expression_body ::= query_expression_body UNION ALL query_expression_body */ + { 352, -3 }, /* (431) query_expression_body ::= query_expression_body UNION query_expression_body */ + { 356, -1 }, /* (432) query_primary ::= query_specification */ + { 356, -6 }, /* (433) query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP */ + { 353, 0 }, /* (434) order_by_clause_opt ::= */ + { 353, -3 }, /* (435) order_by_clause_opt ::= ORDER BY sort_specification_list */ + { 354, 0 }, /* (436) slimit_clause_opt ::= */ + { 354, -2 }, /* (437) slimit_clause_opt ::= SLIMIT NK_INTEGER */ + { 354, -4 }, /* (438) slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */ + { 354, -4 }, /* (439) slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */ + { 355, 0 }, /* (440) limit_clause_opt ::= */ + { 355, -2 }, /* (441) limit_clause_opt ::= LIMIT NK_INTEGER */ + { 355, -4 }, /* (442) limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ + { 355, -4 }, /* (443) limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ + { 317, -3 }, /* (444) subquery ::= NK_LP query_expression NK_RP */ + { 338, -1 }, /* (445) search_condition ::= common_expression */ + { 357, -1 }, /* (446) sort_specification_list ::= sort_specification */ + { 357, -3 }, /* (447) sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ + { 358, -3 }, /* (448) sort_specification ::= expression ordering_specification_opt null_ordering_opt */ + { 359, 0 }, /* (449) ordering_specification_opt ::= */ + { 359, -1 }, /* (450) ordering_specification_opt ::= ASC */ + { 359, -1 }, /* (451) ordering_specification_opt ::= DESC */ + { 360, 0 }, /* (452) null_ordering_opt ::= */ + { 360, -2 }, /* (453) null_ordering_opt ::= NULLS FIRST */ + { 360, -2 }, /* (454) null_ordering_opt ::= NULLS LAST */ }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -3099,11 +3114,11 @@ static YYACTIONTYPE yy_reduce( YYMINORTYPE yylhsminor; case 0: /* cmd ::= CREATE ACCOUNT NK_ID PASS NK_STRING account_options */ { pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); } - yy_destructor(yypParser,239,&yymsp[0].minor); + yy_destructor(yypParser,241,&yymsp[0].minor); break; case 1: /* cmd ::= ALTER ACCOUNT NK_ID alter_account_options */ { pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); } - yy_destructor(yypParser,240,&yymsp[0].minor); + yy_destructor(yypParser,242,&yymsp[0].minor); break; case 2: /* account_options ::= */ { } @@ -3117,20 +3132,20 @@ static YYACTIONTYPE yy_reduce( case 9: /* account_options ::= account_options USERS literal */ yytestcase(yyruleno==9); case 10: /* account_options ::= account_options CONNS literal */ yytestcase(yyruleno==10); case 11: /* account_options ::= account_options STATE literal */ yytestcase(yyruleno==11); -{ yy_destructor(yypParser,239,&yymsp[-2].minor); +{ yy_destructor(yypParser,241,&yymsp[-2].minor); { } - yy_destructor(yypParser,241,&yymsp[0].minor); + yy_destructor(yypParser,243,&yymsp[0].minor); } break; case 12: /* alter_account_options ::= alter_account_option */ -{ yy_destructor(yypParser,242,&yymsp[0].minor); +{ yy_destructor(yypParser,244,&yymsp[0].minor); { } } break; case 13: /* alter_account_options ::= alter_account_options alter_account_option */ -{ yy_destructor(yypParser,240,&yymsp[-1].minor); +{ yy_destructor(yypParser,242,&yymsp[-1].minor); { } - yy_destructor(yypParser,242,&yymsp[0].minor); + yy_destructor(yypParser,244,&yymsp[0].minor); } break; case 14: /* alter_account_option ::= PASS literal */ @@ -3144,63 +3159,63 @@ static YYACTIONTYPE yy_reduce( case 22: /* alter_account_option ::= CONNS literal */ yytestcase(yyruleno==22); case 23: /* alter_account_option ::= STATE literal */ yytestcase(yyruleno==23); { } - yy_destructor(yypParser,241,&yymsp[0].minor); + yy_destructor(yypParser,243,&yymsp[0].minor); break; case 24: /* cmd ::= CREATE USER user_name PASS NK_STRING */ -{ pCxt->pRootNode = createCreateUserStmt(pCxt, &yymsp[-2].minor.yy105, &yymsp[0].minor.yy0); } +{ pCxt->pRootNode = createCreateUserStmt(pCxt, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy0); } break; case 25: /* cmd ::= ALTER USER user_name PASS NK_STRING */ -{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy105, TSDB_ALTER_USER_PASSWD, &yymsp[0].minor.yy0); } +{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy53, TSDB_ALTER_USER_PASSWD, &yymsp[0].minor.yy0); } break; case 26: /* cmd ::= ALTER USER user_name PRIVILEGE NK_STRING */ -{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy105, TSDB_ALTER_USER_PRIVILEGES, &yymsp[0].minor.yy0); } +{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy53, TSDB_ALTER_USER_PRIVILEGES, &yymsp[0].minor.yy0); } break; case 27: /* cmd ::= DROP USER user_name */ -{ pCxt->pRootNode = createDropUserStmt(pCxt, &yymsp[0].minor.yy105); } +{ pCxt->pRootNode = createDropUserStmt(pCxt, &yymsp[0].minor.yy53); } break; case 28: /* cmd ::= GRANT privileges ON priv_level TO user_name */ -{ pCxt->pRootNode = createGrantStmt(pCxt, yymsp[-4].minor.yy593, &yymsp[-2].minor.yy105, &yymsp[0].minor.yy105); } +{ pCxt->pRootNode = createGrantStmt(pCxt, yymsp[-4].minor.yy435, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy53); } break; case 29: /* cmd ::= REVOKE privileges ON priv_level FROM user_name */ -{ pCxt->pRootNode = createRevokeStmt(pCxt, yymsp[-4].minor.yy593, &yymsp[-2].minor.yy105, &yymsp[0].minor.yy105); } +{ pCxt->pRootNode = createRevokeStmt(pCxt, yymsp[-4].minor.yy435, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy53); } break; case 30: /* privileges ::= ALL */ -{ yymsp[0].minor.yy593 = PRIVILEGE_TYPE_ALL; } +{ yymsp[0].minor.yy435 = PRIVILEGE_TYPE_ALL; } break; case 31: /* privileges ::= priv_type_list */ case 32: /* priv_type_list ::= priv_type */ yytestcase(yyruleno==32); -{ yylhsminor.yy593 = yymsp[0].minor.yy593; } - yymsp[0].minor.yy593 = yylhsminor.yy593; +{ yylhsminor.yy435 = yymsp[0].minor.yy435; } + yymsp[0].minor.yy435 = yylhsminor.yy435; break; case 33: /* priv_type_list ::= priv_type_list NK_COMMA priv_type */ -{ yylhsminor.yy593 = yymsp[-2].minor.yy593 | yymsp[0].minor.yy593; } - yymsp[-2].minor.yy593 = yylhsminor.yy593; +{ yylhsminor.yy435 = yymsp[-2].minor.yy435 | yymsp[0].minor.yy435; } + yymsp[-2].minor.yy435 = yylhsminor.yy435; break; case 34: /* priv_type ::= READ */ -{ yymsp[0].minor.yy593 = PRIVILEGE_TYPE_READ; } +{ yymsp[0].minor.yy435 = PRIVILEGE_TYPE_READ; } break; case 35: /* priv_type ::= WRITE */ -{ yymsp[0].minor.yy593 = PRIVILEGE_TYPE_WRITE; } +{ yymsp[0].minor.yy435 = PRIVILEGE_TYPE_WRITE; } break; case 36: /* priv_level ::= NK_STAR NK_DOT NK_STAR */ -{ yylhsminor.yy105 = yymsp[-2].minor.yy0; } - yymsp[-2].minor.yy105 = yylhsminor.yy105; +{ yylhsminor.yy53 = yymsp[-2].minor.yy0; } + yymsp[-2].minor.yy53 = yylhsminor.yy53; break; case 37: /* priv_level ::= db_name NK_DOT NK_STAR */ -{ yylhsminor.yy105 = yymsp[-2].minor.yy105; } - yymsp[-2].minor.yy105 = yylhsminor.yy105; +{ yylhsminor.yy53 = yymsp[-2].minor.yy53; } + yymsp[-2].minor.yy53 = yylhsminor.yy53; break; case 38: /* cmd ::= CREATE DNODE dnode_endpoint */ -{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[0].minor.yy105, NULL); } +{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[0].minor.yy53, NULL); } break; case 39: /* cmd ::= CREATE DNODE dnode_host_name PORT NK_INTEGER */ -{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[-2].minor.yy105, &yymsp[0].minor.yy0); } +{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy0); } break; case 40: /* cmd ::= DROP DNODE NK_INTEGER */ { pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[0].minor.yy0); } break; case 41: /* cmd ::= DROP DNODE dnode_endpoint */ -{ pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[0].minor.yy105); } +{ pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[0].minor.yy53); } break; case 42: /* cmd ::= ALTER DNODE NK_INTEGER NK_STRING */ { pCxt->pRootNode = createAlterDnodeStmt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0, NULL); } @@ -3217,25 +3232,26 @@ static YYACTIONTYPE yy_reduce( case 46: /* dnode_endpoint ::= NK_STRING */ case 47: /* dnode_host_name ::= NK_ID */ yytestcase(yyruleno==47); case 48: /* dnode_host_name ::= NK_IPTOKEN */ yytestcase(yyruleno==48); - case 288: /* db_name ::= NK_ID */ yytestcase(yyruleno==288); - case 289: /* table_name ::= NK_ID */ yytestcase(yyruleno==289); - case 290: /* column_name ::= NK_ID */ yytestcase(yyruleno==290); - case 291: /* function_name ::= NK_ID */ yytestcase(yyruleno==291); - case 292: /* table_alias ::= NK_ID */ yytestcase(yyruleno==292); - case 293: /* column_alias ::= NK_ID */ yytestcase(yyruleno==293); - case 294: /* user_name ::= NK_ID */ yytestcase(yyruleno==294); - case 295: /* index_name ::= NK_ID */ yytestcase(yyruleno==295); - case 296: /* topic_name ::= NK_ID */ yytestcase(yyruleno==296); - case 297: /* stream_name ::= NK_ID */ yytestcase(yyruleno==297); - case 330: /* noarg_func ::= NOW */ yytestcase(yyruleno==330); - case 331: /* noarg_func ::= TODAY */ yytestcase(yyruleno==331); - case 332: /* noarg_func ::= TIMEZONE */ yytestcase(yyruleno==332); - case 333: /* star_func ::= COUNT */ yytestcase(yyruleno==333); - case 334: /* star_func ::= FIRST */ yytestcase(yyruleno==334); - case 335: /* star_func ::= LAST */ yytestcase(yyruleno==335); - case 336: /* star_func ::= LAST_ROW */ yytestcase(yyruleno==336); -{ yylhsminor.yy105 = yymsp[0].minor.yy0; } - yymsp[0].minor.yy105 = yylhsminor.yy105; + case 290: /* db_name ::= NK_ID */ yytestcase(yyruleno==290); + case 291: /* table_name ::= NK_ID */ yytestcase(yyruleno==291); + case 292: /* column_name ::= NK_ID */ yytestcase(yyruleno==292); + case 293: /* function_name ::= NK_ID */ yytestcase(yyruleno==293); + case 294: /* table_alias ::= NK_ID */ yytestcase(yyruleno==294); + case 295: /* column_alias ::= NK_ID */ yytestcase(yyruleno==295); + case 296: /* user_name ::= NK_ID */ yytestcase(yyruleno==296); + case 297: /* index_name ::= NK_ID */ yytestcase(yyruleno==297); + case 298: /* topic_name ::= NK_ID */ yytestcase(yyruleno==298); + case 299: /* stream_name ::= NK_ID */ yytestcase(yyruleno==299); + case 300: /* cgroup_name ::= NK_ID */ yytestcase(yyruleno==300); + case 333: /* noarg_func ::= NOW */ yytestcase(yyruleno==333); + case 334: /* noarg_func ::= TODAY */ yytestcase(yyruleno==334); + case 335: /* noarg_func ::= TIMEZONE */ yytestcase(yyruleno==335); + case 336: /* star_func ::= COUNT */ yytestcase(yyruleno==336); + case 337: /* star_func ::= FIRST */ yytestcase(yyruleno==337); + case 338: /* star_func ::= LAST */ yytestcase(yyruleno==338); + case 339: /* star_func ::= LAST_ROW */ yytestcase(yyruleno==339); +{ yylhsminor.yy53 = yymsp[0].minor.yy0; } + yymsp[0].minor.yy53 = yylhsminor.yy53; break; case 49: /* cmd ::= ALTER LOCAL NK_STRING */ { pCxt->pRootNode = createAlterLocalStmt(pCxt, &yymsp[0].minor.yy0, NULL); } @@ -3268,1154 +3284,1161 @@ static YYACTIONTYPE yy_reduce( { pCxt->pRootNode = createDropComponentNodeStmt(pCxt, QUERY_NODE_DROP_MNODE_STMT, &yymsp[0].minor.yy0); } break; case 59: /* cmd ::= CREATE DATABASE not_exists_opt db_name db_options */ -{ pCxt->pRootNode = createCreateDatabaseStmt(pCxt, yymsp[-2].minor.yy617, &yymsp[-1].minor.yy105, yymsp[0].minor.yy172); } +{ pCxt->pRootNode = createCreateDatabaseStmt(pCxt, yymsp[-2].minor.yy603, &yymsp[-1].minor.yy53, yymsp[0].minor.yy636); } break; case 60: /* cmd ::= DROP DATABASE exists_opt db_name */ -{ pCxt->pRootNode = createDropDatabaseStmt(pCxt, yymsp[-1].minor.yy617, &yymsp[0].minor.yy105); } +{ pCxt->pRootNode = createDropDatabaseStmt(pCxt, yymsp[-1].minor.yy603, &yymsp[0].minor.yy53); } break; case 61: /* cmd ::= USE db_name */ -{ pCxt->pRootNode = createUseDatabaseStmt(pCxt, &yymsp[0].minor.yy105); } +{ pCxt->pRootNode = createUseDatabaseStmt(pCxt, &yymsp[0].minor.yy53); } break; case 62: /* cmd ::= ALTER DATABASE db_name alter_db_options */ -{ pCxt->pRootNode = createAlterDatabaseStmt(pCxt, &yymsp[-1].minor.yy105, yymsp[0].minor.yy172); } +{ pCxt->pRootNode = createAlterDatabaseStmt(pCxt, &yymsp[-1].minor.yy53, yymsp[0].minor.yy636); } break; case 63: /* not_exists_opt ::= IF NOT EXISTS */ -{ yymsp[-2].minor.yy617 = true; } +{ yymsp[-2].minor.yy603 = true; } break; case 64: /* not_exists_opt ::= */ case 66: /* exists_opt ::= */ yytestcase(yyruleno==66); - case 234: /* analyze_opt ::= */ yytestcase(yyruleno==234); - case 242: /* agg_func_opt ::= */ yytestcase(yyruleno==242); - case 389: /* set_quantifier_opt ::= */ yytestcase(yyruleno==389); -{ yymsp[1].minor.yy617 = false; } + case 236: /* analyze_opt ::= */ yytestcase(yyruleno==236); + case 244: /* agg_func_opt ::= */ yytestcase(yyruleno==244); + case 392: /* set_quantifier_opt ::= */ yytestcase(yyruleno==392); +{ yymsp[1].minor.yy603 = false; } break; case 65: /* exists_opt ::= IF EXISTS */ -{ yymsp[-1].minor.yy617 = true; } +{ yymsp[-1].minor.yy603 = true; } break; case 67: /* db_options ::= */ -{ yymsp[1].minor.yy172 = createDefaultDatabaseOptions(pCxt); } +{ yymsp[1].minor.yy636 = createDefaultDatabaseOptions(pCxt); } break; case 68: /* db_options ::= db_options BUFFER NK_INTEGER */ -{ yylhsminor.yy172 = setDatabaseOption(pCxt, yymsp[-2].minor.yy172, DB_OPTION_BUFFER, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; +{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_BUFFER, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; case 69: /* db_options ::= db_options CACHELAST NK_INTEGER */ -{ yylhsminor.yy172 = setDatabaseOption(pCxt, yymsp[-2].minor.yy172, DB_OPTION_CACHELAST, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; +{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_CACHELAST, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; case 70: /* db_options ::= db_options COMP NK_INTEGER */ -{ yylhsminor.yy172 = setDatabaseOption(pCxt, yymsp[-2].minor.yy172, DB_OPTION_COMP, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; +{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_COMP, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; case 71: /* db_options ::= db_options DAYS NK_INTEGER */ case 72: /* db_options ::= db_options DAYS NK_VARIABLE */ yytestcase(yyruleno==72); -{ yylhsminor.yy172 = setDatabaseOption(pCxt, yymsp[-2].minor.yy172, DB_OPTION_DAYS, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; +{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_DAYS, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; case 73: /* db_options ::= db_options FSYNC NK_INTEGER */ -{ yylhsminor.yy172 = setDatabaseOption(pCxt, yymsp[-2].minor.yy172, DB_OPTION_FSYNC, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; +{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_FSYNC, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; case 74: /* db_options ::= db_options MAXROWS NK_INTEGER */ -{ yylhsminor.yy172 = setDatabaseOption(pCxt, yymsp[-2].minor.yy172, DB_OPTION_MAXROWS, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; +{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_MAXROWS, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; case 75: /* db_options ::= db_options MINROWS NK_INTEGER */ -{ yylhsminor.yy172 = setDatabaseOption(pCxt, yymsp[-2].minor.yy172, DB_OPTION_MINROWS, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; +{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_MINROWS, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; case 76: /* db_options ::= db_options KEEP integer_list */ case 77: /* db_options ::= db_options KEEP variable_list */ yytestcase(yyruleno==77); -{ yylhsminor.yy172 = setDatabaseOption(pCxt, yymsp[-2].minor.yy172, DB_OPTION_KEEP, yymsp[0].minor.yy60); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; +{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_KEEP, yymsp[0].minor.yy236); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; case 78: /* db_options ::= db_options PAGES NK_INTEGER */ -{ yylhsminor.yy172 = setDatabaseOption(pCxt, yymsp[-2].minor.yy172, DB_OPTION_PAGES, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; +{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_PAGES, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; case 79: /* db_options ::= db_options PAGESIZE NK_INTEGER */ -{ yylhsminor.yy172 = setDatabaseOption(pCxt, yymsp[-2].minor.yy172, DB_OPTION_PAGESIZE, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; +{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_PAGESIZE, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; case 80: /* db_options ::= db_options PRECISION NK_STRING */ -{ yylhsminor.yy172 = setDatabaseOption(pCxt, yymsp[-2].minor.yy172, DB_OPTION_PRECISION, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; +{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_PRECISION, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; case 81: /* db_options ::= db_options REPLICA NK_INTEGER */ -{ yylhsminor.yy172 = setDatabaseOption(pCxt, yymsp[-2].minor.yy172, DB_OPTION_REPLICA, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; +{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_REPLICA, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; case 82: /* db_options ::= db_options STRICT NK_INTEGER */ -{ yylhsminor.yy172 = setDatabaseOption(pCxt, yymsp[-2].minor.yy172, DB_OPTION_STRICT, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; +{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_STRICT, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; case 83: /* db_options ::= db_options WAL NK_INTEGER */ -{ yylhsminor.yy172 = setDatabaseOption(pCxt, yymsp[-2].minor.yy172, DB_OPTION_WAL, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; +{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_WAL, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; case 84: /* db_options ::= db_options VGROUPS NK_INTEGER */ -{ yylhsminor.yy172 = setDatabaseOption(pCxt, yymsp[-2].minor.yy172, DB_OPTION_VGROUPS, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; +{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_VGROUPS, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; case 85: /* db_options ::= db_options SINGLE_STABLE NK_INTEGER */ -{ yylhsminor.yy172 = setDatabaseOption(pCxt, yymsp[-2].minor.yy172, DB_OPTION_SINGLE_STABLE, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; +{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_SINGLE_STABLE, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; case 86: /* db_options ::= db_options RETENTIONS retention_list */ -{ yylhsminor.yy172 = setDatabaseOption(pCxt, yymsp[-2].minor.yy172, DB_OPTION_RETENTIONS, yymsp[0].minor.yy60); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; - break; - case 87: /* alter_db_options ::= alter_db_option */ -{ yylhsminor.yy172 = createAlterDatabaseOptions(pCxt); yylhsminor.yy172 = setAlterDatabaseOption(pCxt, yylhsminor.yy172, &yymsp[0].minor.yy609); } - yymsp[0].minor.yy172 = yylhsminor.yy172; - break; - case 88: /* alter_db_options ::= alter_db_options alter_db_option */ -{ yylhsminor.yy172 = setAlterDatabaseOption(pCxt, yymsp[-1].minor.yy172, &yymsp[0].minor.yy609); } - yymsp[-1].minor.yy172 = yylhsminor.yy172; - break; - case 89: /* alter_db_option ::= BUFFER NK_INTEGER */ -{ yymsp[-1].minor.yy609.type = DB_OPTION_BUFFER; yymsp[-1].minor.yy609.val = yymsp[0].minor.yy0; } - break; - case 90: /* alter_db_option ::= CACHELAST NK_INTEGER */ -{ yymsp[-1].minor.yy609.type = DB_OPTION_CACHELAST; yymsp[-1].minor.yy609.val = yymsp[0].minor.yy0; } - break; - case 91: /* alter_db_option ::= FSYNC NK_INTEGER */ -{ yymsp[-1].minor.yy609.type = DB_OPTION_FSYNC; yymsp[-1].minor.yy609.val = yymsp[0].minor.yy0; } - break; - case 92: /* alter_db_option ::= KEEP integer_list */ - case 93: /* alter_db_option ::= KEEP variable_list */ yytestcase(yyruleno==93); -{ yymsp[-1].minor.yy609.type = DB_OPTION_KEEP; yymsp[-1].minor.yy609.pList = yymsp[0].minor.yy60; } - break; - case 94: /* alter_db_option ::= PAGES NK_INTEGER */ -{ yymsp[-1].minor.yy609.type = DB_OPTION_PAGES; yymsp[-1].minor.yy609.val = yymsp[0].minor.yy0; } - break; - case 95: /* alter_db_option ::= REPLICA NK_INTEGER */ -{ yymsp[-1].minor.yy609.type = DB_OPTION_REPLICA; yymsp[-1].minor.yy609.val = yymsp[0].minor.yy0; } - break; - case 96: /* alter_db_option ::= STRICT NK_INTEGER */ -{ yymsp[-1].minor.yy609.type = DB_OPTION_STRICT; yymsp[-1].minor.yy609.val = yymsp[0].minor.yy0; } - break; - case 97: /* alter_db_option ::= WAL NK_INTEGER */ -{ yymsp[-1].minor.yy609.type = DB_OPTION_WAL; yymsp[-1].minor.yy609.val = yymsp[0].minor.yy0; } - break; - case 98: /* integer_list ::= NK_INTEGER */ -{ yylhsminor.yy60 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy60 = yylhsminor.yy60; - break; - case 99: /* integer_list ::= integer_list NK_COMMA NK_INTEGER */ - case 261: /* dnode_list ::= dnode_list DNODE NK_INTEGER */ yytestcase(yyruleno==261); -{ yylhsminor.yy60 = addNodeToList(pCxt, yymsp[-2].minor.yy60, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } - yymsp[-2].minor.yy60 = yylhsminor.yy60; - break; - case 100: /* variable_list ::= NK_VARIABLE */ -{ yylhsminor.yy60 = createNodeList(pCxt, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy60 = yylhsminor.yy60; - break; - case 101: /* variable_list ::= variable_list NK_COMMA NK_VARIABLE */ -{ yylhsminor.yy60 = addNodeToList(pCxt, yymsp[-2].minor.yy60, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } - yymsp[-2].minor.yy60 = yylhsminor.yy60; - break; - case 102: /* retention_list ::= retention */ - case 122: /* multi_create_clause ::= create_subtable_clause */ yytestcase(yyruleno==122); - case 125: /* multi_drop_clause ::= drop_table_clause */ yytestcase(yyruleno==125); - case 132: /* column_def_list ::= column_def */ yytestcase(yyruleno==132); - case 173: /* col_name_list ::= col_name */ yytestcase(yyruleno==173); - case 211: /* func_name_list ::= func_name */ yytestcase(yyruleno==211); - case 220: /* func_list ::= func */ yytestcase(yyruleno==220); - case 286: /* literal_list ::= signed_literal */ yytestcase(yyruleno==286); - case 339: /* other_para_list ::= star_func_para */ yytestcase(yyruleno==339); - case 394: /* select_sublist ::= select_item */ yytestcase(yyruleno==394); - case 443: /* sort_specification_list ::= sort_specification */ yytestcase(yyruleno==443); -{ yylhsminor.yy60 = createNodeList(pCxt, yymsp[0].minor.yy172); } - yymsp[0].minor.yy60 = yylhsminor.yy60; - break; - case 103: /* retention_list ::= retention_list NK_COMMA retention */ - case 133: /* column_def_list ::= column_def_list NK_COMMA column_def */ yytestcase(yyruleno==133); - case 174: /* col_name_list ::= col_name_list NK_COMMA col_name */ yytestcase(yyruleno==174); - case 212: /* func_name_list ::= func_name_list NK_COMMA func_name */ yytestcase(yyruleno==212); - case 221: /* func_list ::= func_list NK_COMMA func */ yytestcase(yyruleno==221); - case 287: /* literal_list ::= literal_list NK_COMMA signed_literal */ yytestcase(yyruleno==287); - case 340: /* other_para_list ::= other_para_list NK_COMMA star_func_para */ yytestcase(yyruleno==340); - case 395: /* select_sublist ::= select_sublist NK_COMMA select_item */ yytestcase(yyruleno==395); - case 444: /* sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ yytestcase(yyruleno==444); -{ yylhsminor.yy60 = addNodeToList(pCxt, yymsp[-2].minor.yy60, yymsp[0].minor.yy172); } - yymsp[-2].minor.yy60 = yylhsminor.yy60; - break; - case 104: /* retention ::= NK_VARIABLE NK_COLON NK_VARIABLE */ -{ yylhsminor.yy172 = createNodeListNodeEx(pCxt, createDurationValueNode(pCxt, &yymsp[-2].minor.yy0), createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; - break; - case 105: /* cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options */ - case 107: /* cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options */ yytestcase(yyruleno==107); -{ pCxt->pRootNode = createCreateTableStmt(pCxt, yymsp[-6].minor.yy617, yymsp[-5].minor.yy172, yymsp[-3].minor.yy60, yymsp[-1].minor.yy60, yymsp[0].minor.yy172); } - break; - case 106: /* cmd ::= CREATE TABLE multi_create_clause */ -{ pCxt->pRootNode = createCreateMultiTableStmt(pCxt, yymsp[0].minor.yy60); } - break; - case 108: /* cmd ::= DROP TABLE multi_drop_clause */ -{ pCxt->pRootNode = createDropTableStmt(pCxt, yymsp[0].minor.yy60); } - break; - case 109: /* cmd ::= DROP STABLE exists_opt full_table_name */ -{ pCxt->pRootNode = createDropSuperTableStmt(pCxt, yymsp[-1].minor.yy617, yymsp[0].minor.yy172); } - break; - case 110: /* cmd ::= ALTER TABLE alter_table_clause */ - case 111: /* cmd ::= ALTER STABLE alter_table_clause */ yytestcase(yyruleno==111); - case 263: /* cmd ::= query_expression */ yytestcase(yyruleno==263); -{ pCxt->pRootNode = yymsp[0].minor.yy172; } - break; - case 112: /* alter_table_clause ::= full_table_name alter_table_options */ -{ yylhsminor.yy172 = createAlterTableModifyOptions(pCxt, yymsp[-1].minor.yy172, yymsp[0].minor.yy172); } - yymsp[-1].minor.yy172 = yylhsminor.yy172; - break; - case 113: /* alter_table_clause ::= full_table_name ADD COLUMN column_name type_name */ -{ yylhsminor.yy172 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy172, TSDB_ALTER_TABLE_ADD_COLUMN, &yymsp[-1].minor.yy105, yymsp[0].minor.yy248); } - yymsp[-4].minor.yy172 = yylhsminor.yy172; - break; - case 114: /* alter_table_clause ::= full_table_name DROP COLUMN column_name */ -{ yylhsminor.yy172 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy172, TSDB_ALTER_TABLE_DROP_COLUMN, &yymsp[0].minor.yy105); } - yymsp[-3].minor.yy172 = yylhsminor.yy172; - break; - case 115: /* alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name */ -{ yylhsminor.yy172 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy172, TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES, &yymsp[-1].minor.yy105, yymsp[0].minor.yy248); } - yymsp[-4].minor.yy172 = yylhsminor.yy172; +{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_RETENTIONS, yymsp[0].minor.yy236); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; + break; + case 87: /* db_options ::= db_options SCHEMALESS NK_INTEGER */ +{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_SCHEMALESS, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; + break; + case 88: /* alter_db_options ::= alter_db_option */ +{ yylhsminor.yy636 = createAlterDatabaseOptions(pCxt); yylhsminor.yy636 = setAlterDatabaseOption(pCxt, yylhsminor.yy636, &yymsp[0].minor.yy25); } + yymsp[0].minor.yy636 = yylhsminor.yy636; + break; + case 89: /* alter_db_options ::= alter_db_options alter_db_option */ +{ yylhsminor.yy636 = setAlterDatabaseOption(pCxt, yymsp[-1].minor.yy636, &yymsp[0].minor.yy25); } + yymsp[-1].minor.yy636 = yylhsminor.yy636; + break; + case 90: /* alter_db_option ::= BUFFER NK_INTEGER */ +{ yymsp[-1].minor.yy25.type = DB_OPTION_BUFFER; yymsp[-1].minor.yy25.val = yymsp[0].minor.yy0; } + break; + case 91: /* alter_db_option ::= CACHELAST NK_INTEGER */ +{ yymsp[-1].minor.yy25.type = DB_OPTION_CACHELAST; yymsp[-1].minor.yy25.val = yymsp[0].minor.yy0; } + break; + case 92: /* alter_db_option ::= FSYNC NK_INTEGER */ +{ yymsp[-1].minor.yy25.type = DB_OPTION_FSYNC; yymsp[-1].minor.yy25.val = yymsp[0].minor.yy0; } + break; + case 93: /* alter_db_option ::= KEEP integer_list */ + case 94: /* alter_db_option ::= KEEP variable_list */ yytestcase(yyruleno==94); +{ yymsp[-1].minor.yy25.type = DB_OPTION_KEEP; yymsp[-1].minor.yy25.pList = yymsp[0].minor.yy236; } + break; + case 95: /* alter_db_option ::= PAGES NK_INTEGER */ +{ yymsp[-1].minor.yy25.type = DB_OPTION_PAGES; yymsp[-1].minor.yy25.val = yymsp[0].minor.yy0; } + break; + case 96: /* alter_db_option ::= REPLICA NK_INTEGER */ +{ yymsp[-1].minor.yy25.type = DB_OPTION_REPLICA; yymsp[-1].minor.yy25.val = yymsp[0].minor.yy0; } + break; + case 97: /* alter_db_option ::= STRICT NK_INTEGER */ +{ yymsp[-1].minor.yy25.type = DB_OPTION_STRICT; yymsp[-1].minor.yy25.val = yymsp[0].minor.yy0; } + break; + case 98: /* alter_db_option ::= WAL NK_INTEGER */ +{ yymsp[-1].minor.yy25.type = DB_OPTION_WAL; yymsp[-1].minor.yy25.val = yymsp[0].minor.yy0; } + break; + case 99: /* integer_list ::= NK_INTEGER */ +{ yylhsminor.yy236 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy236 = yylhsminor.yy236; + break; + case 100: /* integer_list ::= integer_list NK_COMMA NK_INTEGER */ + case 263: /* dnode_list ::= dnode_list DNODE NK_INTEGER */ yytestcase(yyruleno==263); +{ yylhsminor.yy236 = addNodeToList(pCxt, yymsp[-2].minor.yy236, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } + yymsp[-2].minor.yy236 = yylhsminor.yy236; + break; + case 101: /* variable_list ::= NK_VARIABLE */ +{ yylhsminor.yy236 = createNodeList(pCxt, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy236 = yylhsminor.yy236; + break; + case 102: /* variable_list ::= variable_list NK_COMMA NK_VARIABLE */ +{ yylhsminor.yy236 = addNodeToList(pCxt, yymsp[-2].minor.yy236, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } + yymsp[-2].minor.yy236 = yylhsminor.yy236; + break; + case 103: /* retention_list ::= retention */ + case 123: /* multi_create_clause ::= create_subtable_clause */ yytestcase(yyruleno==123); + case 126: /* multi_drop_clause ::= drop_table_clause */ yytestcase(yyruleno==126); + case 133: /* column_def_list ::= column_def */ yytestcase(yyruleno==133); + case 174: /* col_name_list ::= col_name */ yytestcase(yyruleno==174); + case 212: /* func_name_list ::= func_name */ yytestcase(yyruleno==212); + case 221: /* func_list ::= func */ yytestcase(yyruleno==221); + case 288: /* literal_list ::= signed_literal */ yytestcase(yyruleno==288); + case 342: /* other_para_list ::= star_func_para */ yytestcase(yyruleno==342); + case 397: /* select_sublist ::= select_item */ yytestcase(yyruleno==397); + case 446: /* sort_specification_list ::= sort_specification */ yytestcase(yyruleno==446); +{ yylhsminor.yy236 = createNodeList(pCxt, yymsp[0].minor.yy636); } + yymsp[0].minor.yy236 = yylhsminor.yy236; + break; + case 104: /* retention_list ::= retention_list NK_COMMA retention */ + case 134: /* column_def_list ::= column_def_list NK_COMMA column_def */ yytestcase(yyruleno==134); + case 175: /* col_name_list ::= col_name_list NK_COMMA col_name */ yytestcase(yyruleno==175); + case 213: /* func_name_list ::= func_name_list NK_COMMA func_name */ yytestcase(yyruleno==213); + case 222: /* func_list ::= func_list NK_COMMA func */ yytestcase(yyruleno==222); + case 289: /* literal_list ::= literal_list NK_COMMA signed_literal */ yytestcase(yyruleno==289); + case 343: /* other_para_list ::= other_para_list NK_COMMA star_func_para */ yytestcase(yyruleno==343); + case 398: /* select_sublist ::= select_sublist NK_COMMA select_item */ yytestcase(yyruleno==398); + case 447: /* sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ yytestcase(yyruleno==447); +{ yylhsminor.yy236 = addNodeToList(pCxt, yymsp[-2].minor.yy236, yymsp[0].minor.yy636); } + yymsp[-2].minor.yy236 = yylhsminor.yy236; + break; + case 105: /* retention ::= NK_VARIABLE NK_COLON NK_VARIABLE */ +{ yylhsminor.yy636 = createNodeListNodeEx(pCxt, createDurationValueNode(pCxt, &yymsp[-2].minor.yy0), createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; + break; + case 106: /* cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options */ + case 108: /* cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options */ yytestcase(yyruleno==108); +{ pCxt->pRootNode = createCreateTableStmt(pCxt, yymsp[-6].minor.yy603, yymsp[-5].minor.yy636, yymsp[-3].minor.yy236, yymsp[-1].minor.yy236, yymsp[0].minor.yy636); } + break; + case 107: /* cmd ::= CREATE TABLE multi_create_clause */ +{ pCxt->pRootNode = createCreateMultiTableStmt(pCxt, yymsp[0].minor.yy236); } + break; + case 109: /* cmd ::= DROP TABLE multi_drop_clause */ +{ pCxt->pRootNode = createDropTableStmt(pCxt, yymsp[0].minor.yy236); } + break; + case 110: /* cmd ::= DROP STABLE exists_opt full_table_name */ +{ pCxt->pRootNode = createDropSuperTableStmt(pCxt, yymsp[-1].minor.yy603, yymsp[0].minor.yy636); } + break; + case 111: /* cmd ::= ALTER TABLE alter_table_clause */ + case 112: /* cmd ::= ALTER STABLE alter_table_clause */ yytestcase(yyruleno==112); + case 265: /* cmd ::= query_expression */ yytestcase(yyruleno==265); +{ pCxt->pRootNode = yymsp[0].minor.yy636; } + break; + case 113: /* alter_table_clause ::= full_table_name alter_table_options */ +{ yylhsminor.yy636 = createAlterTableModifyOptions(pCxt, yymsp[-1].minor.yy636, yymsp[0].minor.yy636); } + yymsp[-1].minor.yy636 = yylhsminor.yy636; + break; + case 114: /* alter_table_clause ::= full_table_name ADD COLUMN column_name type_name */ +{ yylhsminor.yy636 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy636, TSDB_ALTER_TABLE_ADD_COLUMN, &yymsp[-1].minor.yy53, yymsp[0].minor.yy450); } + yymsp[-4].minor.yy636 = yylhsminor.yy636; + break; + case 115: /* alter_table_clause ::= full_table_name DROP COLUMN column_name */ +{ yylhsminor.yy636 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy636, TSDB_ALTER_TABLE_DROP_COLUMN, &yymsp[0].minor.yy53); } + yymsp[-3].minor.yy636 = yylhsminor.yy636; + break; + case 116: /* alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name */ +{ yylhsminor.yy636 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy636, TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES, &yymsp[-1].minor.yy53, yymsp[0].minor.yy450); } + yymsp[-4].minor.yy636 = yylhsminor.yy636; break; - case 116: /* alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name */ -{ yylhsminor.yy172 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy172, TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME, &yymsp[-1].minor.yy105, &yymsp[0].minor.yy105); } - yymsp[-4].minor.yy172 = yylhsminor.yy172; - break; - case 117: /* alter_table_clause ::= full_table_name ADD TAG column_name type_name */ -{ yylhsminor.yy172 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy172, TSDB_ALTER_TABLE_ADD_TAG, &yymsp[-1].minor.yy105, yymsp[0].minor.yy248); } - yymsp[-4].minor.yy172 = yylhsminor.yy172; - break; - case 118: /* alter_table_clause ::= full_table_name DROP TAG column_name */ -{ yylhsminor.yy172 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy172, TSDB_ALTER_TABLE_DROP_TAG, &yymsp[0].minor.yy105); } - yymsp[-3].minor.yy172 = yylhsminor.yy172; + case 117: /* alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name */ +{ yylhsminor.yy636 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy636, TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME, &yymsp[-1].minor.yy53, &yymsp[0].minor.yy53); } + yymsp[-4].minor.yy636 = yylhsminor.yy636; + break; + case 118: /* alter_table_clause ::= full_table_name ADD TAG column_name type_name */ +{ yylhsminor.yy636 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy636, TSDB_ALTER_TABLE_ADD_TAG, &yymsp[-1].minor.yy53, yymsp[0].minor.yy450); } + yymsp[-4].minor.yy636 = yylhsminor.yy636; + break; + case 119: /* alter_table_clause ::= full_table_name DROP TAG column_name */ +{ yylhsminor.yy636 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy636, TSDB_ALTER_TABLE_DROP_TAG, &yymsp[0].minor.yy53); } + yymsp[-3].minor.yy636 = yylhsminor.yy636; break; - case 119: /* alter_table_clause ::= full_table_name MODIFY TAG column_name type_name */ -{ yylhsminor.yy172 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy172, TSDB_ALTER_TABLE_UPDATE_TAG_BYTES, &yymsp[-1].minor.yy105, yymsp[0].minor.yy248); } - yymsp[-4].minor.yy172 = yylhsminor.yy172; + case 120: /* alter_table_clause ::= full_table_name MODIFY TAG column_name type_name */ +{ yylhsminor.yy636 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy636, TSDB_ALTER_TABLE_UPDATE_TAG_BYTES, &yymsp[-1].minor.yy53, yymsp[0].minor.yy450); } + yymsp[-4].minor.yy636 = yylhsminor.yy636; break; - case 120: /* alter_table_clause ::= full_table_name RENAME TAG column_name column_name */ -{ yylhsminor.yy172 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy172, TSDB_ALTER_TABLE_UPDATE_TAG_NAME, &yymsp[-1].minor.yy105, &yymsp[0].minor.yy105); } - yymsp[-4].minor.yy172 = yylhsminor.yy172; - break; - case 121: /* alter_table_clause ::= full_table_name SET TAG column_name NK_EQ signed_literal */ -{ yylhsminor.yy172 = createAlterTableSetTag(pCxt, yymsp[-5].minor.yy172, &yymsp[-2].minor.yy105, yymsp[0].minor.yy172); } - yymsp[-5].minor.yy172 = yylhsminor.yy172; + case 121: /* alter_table_clause ::= full_table_name RENAME TAG column_name column_name */ +{ yylhsminor.yy636 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy636, TSDB_ALTER_TABLE_UPDATE_TAG_NAME, &yymsp[-1].minor.yy53, &yymsp[0].minor.yy53); } + yymsp[-4].minor.yy636 = yylhsminor.yy636; + break; + case 122: /* alter_table_clause ::= full_table_name SET TAG column_name NK_EQ signed_literal */ +{ yylhsminor.yy636 = createAlterTableSetTag(pCxt, yymsp[-5].minor.yy636, &yymsp[-2].minor.yy53, yymsp[0].minor.yy636); } + yymsp[-5].minor.yy636 = yylhsminor.yy636; break; - case 123: /* multi_create_clause ::= multi_create_clause create_subtable_clause */ - case 126: /* multi_drop_clause ::= multi_drop_clause drop_table_clause */ yytestcase(yyruleno==126); -{ yylhsminor.yy60 = addNodeToList(pCxt, yymsp[-1].minor.yy60, yymsp[0].minor.yy172); } - yymsp[-1].minor.yy60 = yylhsminor.yy60; + case 124: /* multi_create_clause ::= multi_create_clause create_subtable_clause */ + case 127: /* multi_drop_clause ::= multi_drop_clause drop_table_clause */ yytestcase(yyruleno==127); +{ yylhsminor.yy236 = addNodeToList(pCxt, yymsp[-1].minor.yy236, yymsp[0].minor.yy636); } + yymsp[-1].minor.yy236 = yylhsminor.yy236; break; - case 124: /* create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_tags_opt TAGS NK_LP literal_list NK_RP table_options */ -{ yylhsminor.yy172 = createCreateSubTableClause(pCxt, yymsp[-9].minor.yy617, yymsp[-8].minor.yy172, yymsp[-6].minor.yy172, yymsp[-5].minor.yy60, yymsp[-2].minor.yy60, yymsp[0].minor.yy172); } - yymsp[-9].minor.yy172 = yylhsminor.yy172; + case 125: /* create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_tags_opt TAGS NK_LP literal_list NK_RP table_options */ +{ yylhsminor.yy636 = createCreateSubTableClause(pCxt, yymsp[-9].minor.yy603, yymsp[-8].minor.yy636, yymsp[-6].minor.yy636, yymsp[-5].minor.yy236, yymsp[-2].minor.yy236, yymsp[0].minor.yy636); } + yymsp[-9].minor.yy636 = yylhsminor.yy636; break; - case 127: /* drop_table_clause ::= exists_opt full_table_name */ -{ yylhsminor.yy172 = createDropTableClause(pCxt, yymsp[-1].minor.yy617, yymsp[0].minor.yy172); } - yymsp[-1].minor.yy172 = yylhsminor.yy172; + case 128: /* drop_table_clause ::= exists_opt full_table_name */ +{ yylhsminor.yy636 = createDropTableClause(pCxt, yymsp[-1].minor.yy603, yymsp[0].minor.yy636); } + yymsp[-1].minor.yy636 = yylhsminor.yy636; break; - case 128: /* specific_tags_opt ::= */ - case 159: /* tags_def_opt ::= */ yytestcase(yyruleno==159); - case 402: /* partition_by_clause_opt ::= */ yytestcase(yyruleno==402); - case 419: /* group_by_clause_opt ::= */ yytestcase(yyruleno==419); - case 431: /* order_by_clause_opt ::= */ yytestcase(yyruleno==431); -{ yymsp[1].minor.yy60 = NULL; } + case 129: /* specific_tags_opt ::= */ + case 160: /* tags_def_opt ::= */ yytestcase(yyruleno==160); + case 405: /* partition_by_clause_opt ::= */ yytestcase(yyruleno==405); + case 422: /* group_by_clause_opt ::= */ yytestcase(yyruleno==422); + case 434: /* order_by_clause_opt ::= */ yytestcase(yyruleno==434); +{ yymsp[1].minor.yy236 = NULL; } break; - case 129: /* specific_tags_opt ::= NK_LP col_name_list NK_RP */ -{ yymsp[-2].minor.yy60 = yymsp[-1].minor.yy60; } + case 130: /* specific_tags_opt ::= NK_LP col_name_list NK_RP */ +{ yymsp[-2].minor.yy236 = yymsp[-1].minor.yy236; } break; - case 130: /* full_table_name ::= table_name */ -{ yylhsminor.yy172 = createRealTableNode(pCxt, NULL, &yymsp[0].minor.yy105, NULL); } - yymsp[0].minor.yy172 = yylhsminor.yy172; + case 131: /* full_table_name ::= table_name */ +{ yylhsminor.yy636 = createRealTableNode(pCxt, NULL, &yymsp[0].minor.yy53, NULL); } + yymsp[0].minor.yy636 = yylhsminor.yy636; break; - case 131: /* full_table_name ::= db_name NK_DOT table_name */ -{ yylhsminor.yy172 = createRealTableNode(pCxt, &yymsp[-2].minor.yy105, &yymsp[0].minor.yy105, NULL); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; + case 132: /* full_table_name ::= db_name NK_DOT table_name */ +{ yylhsminor.yy636 = createRealTableNode(pCxt, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy53, NULL); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; - case 134: /* column_def ::= column_name type_name */ -{ yylhsminor.yy172 = createColumnDefNode(pCxt, &yymsp[-1].minor.yy105, yymsp[0].minor.yy248, NULL); } - yymsp[-1].minor.yy172 = yylhsminor.yy172; + case 135: /* column_def ::= column_name type_name */ +{ yylhsminor.yy636 = createColumnDefNode(pCxt, &yymsp[-1].minor.yy53, yymsp[0].minor.yy450, NULL); } + yymsp[-1].minor.yy636 = yylhsminor.yy636; break; - case 135: /* column_def ::= column_name type_name COMMENT NK_STRING */ -{ yylhsminor.yy172 = createColumnDefNode(pCxt, &yymsp[-3].minor.yy105, yymsp[-2].minor.yy248, &yymsp[0].minor.yy0); } - yymsp[-3].minor.yy172 = yylhsminor.yy172; + case 136: /* column_def ::= column_name type_name COMMENT NK_STRING */ +{ yylhsminor.yy636 = createColumnDefNode(pCxt, &yymsp[-3].minor.yy53, yymsp[-2].minor.yy450, &yymsp[0].minor.yy0); } + yymsp[-3].minor.yy636 = yylhsminor.yy636; break; - case 136: /* type_name ::= BOOL */ -{ yymsp[0].minor.yy248 = createDataType(TSDB_DATA_TYPE_BOOL); } + case 137: /* type_name ::= BOOL */ +{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_BOOL); } break; - case 137: /* type_name ::= TINYINT */ -{ yymsp[0].minor.yy248 = createDataType(TSDB_DATA_TYPE_TINYINT); } + case 138: /* type_name ::= TINYINT */ +{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_TINYINT); } break; - case 138: /* type_name ::= SMALLINT */ -{ yymsp[0].minor.yy248 = createDataType(TSDB_DATA_TYPE_SMALLINT); } + case 139: /* type_name ::= SMALLINT */ +{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_SMALLINT); } break; - case 139: /* type_name ::= INT */ - case 140: /* type_name ::= INTEGER */ yytestcase(yyruleno==140); -{ yymsp[0].minor.yy248 = createDataType(TSDB_DATA_TYPE_INT); } + case 140: /* type_name ::= INT */ + case 141: /* type_name ::= INTEGER */ yytestcase(yyruleno==141); +{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_INT); } break; - case 141: /* type_name ::= BIGINT */ -{ yymsp[0].minor.yy248 = createDataType(TSDB_DATA_TYPE_BIGINT); } + case 142: /* type_name ::= BIGINT */ +{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_BIGINT); } break; - case 142: /* type_name ::= FLOAT */ -{ yymsp[0].minor.yy248 = createDataType(TSDB_DATA_TYPE_FLOAT); } + case 143: /* type_name ::= FLOAT */ +{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_FLOAT); } break; - case 143: /* type_name ::= DOUBLE */ -{ yymsp[0].minor.yy248 = createDataType(TSDB_DATA_TYPE_DOUBLE); } + case 144: /* type_name ::= DOUBLE */ +{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_DOUBLE); } break; - case 144: /* type_name ::= BINARY NK_LP NK_INTEGER NK_RP */ -{ yymsp[-3].minor.yy248 = createVarLenDataType(TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy0); } + case 145: /* type_name ::= BINARY NK_LP NK_INTEGER NK_RP */ +{ yymsp[-3].minor.yy450 = createVarLenDataType(TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy0); } break; - case 145: /* type_name ::= TIMESTAMP */ -{ yymsp[0].minor.yy248 = createDataType(TSDB_DATA_TYPE_TIMESTAMP); } + case 146: /* type_name ::= TIMESTAMP */ +{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_TIMESTAMP); } break; - case 146: /* type_name ::= NCHAR NK_LP NK_INTEGER NK_RP */ -{ yymsp[-3].minor.yy248 = createVarLenDataType(TSDB_DATA_TYPE_NCHAR, &yymsp[-1].minor.yy0); } + case 147: /* type_name ::= NCHAR NK_LP NK_INTEGER NK_RP */ +{ yymsp[-3].minor.yy450 = createVarLenDataType(TSDB_DATA_TYPE_NCHAR, &yymsp[-1].minor.yy0); } break; - case 147: /* type_name ::= TINYINT UNSIGNED */ -{ yymsp[-1].minor.yy248 = createDataType(TSDB_DATA_TYPE_UTINYINT); } + case 148: /* type_name ::= TINYINT UNSIGNED */ +{ yymsp[-1].minor.yy450 = createDataType(TSDB_DATA_TYPE_UTINYINT); } break; - case 148: /* type_name ::= SMALLINT UNSIGNED */ -{ yymsp[-1].minor.yy248 = createDataType(TSDB_DATA_TYPE_USMALLINT); } + case 149: /* type_name ::= SMALLINT UNSIGNED */ +{ yymsp[-1].minor.yy450 = createDataType(TSDB_DATA_TYPE_USMALLINT); } break; - case 149: /* type_name ::= INT UNSIGNED */ -{ yymsp[-1].minor.yy248 = createDataType(TSDB_DATA_TYPE_UINT); } + case 150: /* type_name ::= INT UNSIGNED */ +{ yymsp[-1].minor.yy450 = createDataType(TSDB_DATA_TYPE_UINT); } break; - case 150: /* type_name ::= BIGINT UNSIGNED */ -{ yymsp[-1].minor.yy248 = createDataType(TSDB_DATA_TYPE_UBIGINT); } + case 151: /* type_name ::= BIGINT UNSIGNED */ +{ yymsp[-1].minor.yy450 = createDataType(TSDB_DATA_TYPE_UBIGINT); } break; - case 151: /* type_name ::= JSON */ -{ yymsp[0].minor.yy248 = createDataType(TSDB_DATA_TYPE_JSON); } + case 152: /* type_name ::= JSON */ +{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_JSON); } break; - case 152: /* type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP */ -{ yymsp[-3].minor.yy248 = createVarLenDataType(TSDB_DATA_TYPE_VARCHAR, &yymsp[-1].minor.yy0); } + case 153: /* type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP */ +{ yymsp[-3].minor.yy450 = createVarLenDataType(TSDB_DATA_TYPE_VARCHAR, &yymsp[-1].minor.yy0); } break; - case 153: /* type_name ::= MEDIUMBLOB */ -{ yymsp[0].minor.yy248 = createDataType(TSDB_DATA_TYPE_MEDIUMBLOB); } + case 154: /* type_name ::= MEDIUMBLOB */ +{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_MEDIUMBLOB); } break; - case 154: /* type_name ::= BLOB */ -{ yymsp[0].minor.yy248 = createDataType(TSDB_DATA_TYPE_BLOB); } + case 155: /* type_name ::= BLOB */ +{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_BLOB); } break; - case 155: /* type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP */ -{ yymsp[-3].minor.yy248 = createVarLenDataType(TSDB_DATA_TYPE_VARBINARY, &yymsp[-1].minor.yy0); } + case 156: /* type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP */ +{ yymsp[-3].minor.yy450 = createVarLenDataType(TSDB_DATA_TYPE_VARBINARY, &yymsp[-1].minor.yy0); } break; - case 156: /* type_name ::= DECIMAL */ -{ yymsp[0].minor.yy248 = createDataType(TSDB_DATA_TYPE_DECIMAL); } + case 157: /* type_name ::= DECIMAL */ +{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_DECIMAL); } break; - case 157: /* type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP */ -{ yymsp[-3].minor.yy248 = createDataType(TSDB_DATA_TYPE_DECIMAL); } + case 158: /* type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP */ +{ yymsp[-3].minor.yy450 = createDataType(TSDB_DATA_TYPE_DECIMAL); } break; - case 158: /* type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP */ -{ yymsp[-5].minor.yy248 = createDataType(TSDB_DATA_TYPE_DECIMAL); } + case 159: /* type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP */ +{ yymsp[-5].minor.yy450 = createDataType(TSDB_DATA_TYPE_DECIMAL); } break; - case 160: /* tags_def_opt ::= tags_def */ - case 338: /* star_func_para_list ::= other_para_list */ yytestcase(yyruleno==338); - case 393: /* select_list ::= select_sublist */ yytestcase(yyruleno==393); -{ yylhsminor.yy60 = yymsp[0].minor.yy60; } - yymsp[0].minor.yy60 = yylhsminor.yy60; + case 161: /* tags_def_opt ::= tags_def */ + case 341: /* star_func_para_list ::= other_para_list */ yytestcase(yyruleno==341); + case 396: /* select_list ::= select_sublist */ yytestcase(yyruleno==396); +{ yylhsminor.yy236 = yymsp[0].minor.yy236; } + yymsp[0].minor.yy236 = yylhsminor.yy236; break; - case 161: /* tags_def ::= TAGS NK_LP column_def_list NK_RP */ -{ yymsp[-3].minor.yy60 = yymsp[-1].minor.yy60; } + case 162: /* tags_def ::= TAGS NK_LP column_def_list NK_RP */ +{ yymsp[-3].minor.yy236 = yymsp[-1].minor.yy236; } break; - case 162: /* table_options ::= */ -{ yymsp[1].minor.yy172 = createDefaultTableOptions(pCxt); } + case 163: /* table_options ::= */ +{ yymsp[1].minor.yy636 = createDefaultTableOptions(pCxt); } break; - case 163: /* table_options ::= table_options COMMENT NK_STRING */ -{ yylhsminor.yy172 = setTableOption(pCxt, yymsp[-2].minor.yy172, TABLE_OPTION_COMMENT, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; + case 164: /* table_options ::= table_options COMMENT NK_STRING */ +{ yylhsminor.yy636 = setTableOption(pCxt, yymsp[-2].minor.yy636, TABLE_OPTION_COMMENT, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; - case 164: /* table_options ::= table_options DELAY NK_INTEGER */ -{ yylhsminor.yy172 = setTableOption(pCxt, yymsp[-2].minor.yy172, TABLE_OPTION_DELAY, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; + case 165: /* table_options ::= table_options DELAY NK_INTEGER */ +{ yylhsminor.yy636 = setTableOption(pCxt, yymsp[-2].minor.yy636, TABLE_OPTION_DELAY, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; - case 165: /* table_options ::= table_options FILE_FACTOR NK_FLOAT */ -{ yylhsminor.yy172 = setTableOption(pCxt, yymsp[-2].minor.yy172, TABLE_OPTION_FILE_FACTOR, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; + case 166: /* table_options ::= table_options FILE_FACTOR NK_FLOAT */ +{ yylhsminor.yy636 = setTableOption(pCxt, yymsp[-2].minor.yy636, TABLE_OPTION_FILE_FACTOR, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; - case 166: /* table_options ::= table_options ROLLUP NK_LP func_name_list NK_RP */ -{ yylhsminor.yy172 = setTableOption(pCxt, yymsp[-4].minor.yy172, TABLE_OPTION_ROLLUP, yymsp[-1].minor.yy60); } - yymsp[-4].minor.yy172 = yylhsminor.yy172; + case 167: /* table_options ::= table_options ROLLUP NK_LP func_name_list NK_RP */ +{ yylhsminor.yy636 = setTableOption(pCxt, yymsp[-4].minor.yy636, TABLE_OPTION_ROLLUP, yymsp[-1].minor.yy236); } + yymsp[-4].minor.yy636 = yylhsminor.yy636; break; - case 167: /* table_options ::= table_options TTL NK_INTEGER */ -{ yylhsminor.yy172 = setTableOption(pCxt, yymsp[-2].minor.yy172, TABLE_OPTION_TTL, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; + case 168: /* table_options ::= table_options TTL NK_INTEGER */ +{ yylhsminor.yy636 = setTableOption(pCxt, yymsp[-2].minor.yy636, TABLE_OPTION_TTL, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; - case 168: /* table_options ::= table_options SMA NK_LP col_name_list NK_RP */ -{ yylhsminor.yy172 = setTableOption(pCxt, yymsp[-4].minor.yy172, TABLE_OPTION_SMA, yymsp[-1].minor.yy60); } - yymsp[-4].minor.yy172 = yylhsminor.yy172; + case 169: /* table_options ::= table_options SMA NK_LP col_name_list NK_RP */ +{ yylhsminor.yy636 = setTableOption(pCxt, yymsp[-4].minor.yy636, TABLE_OPTION_SMA, yymsp[-1].minor.yy236); } + yymsp[-4].minor.yy636 = yylhsminor.yy636; break; - case 169: /* alter_table_options ::= alter_table_option */ -{ yylhsminor.yy172 = createAlterTableOptions(pCxt); yylhsminor.yy172 = setTableOption(pCxt, yylhsminor.yy172, yymsp[0].minor.yy609.type, &yymsp[0].minor.yy609.val); } - yymsp[0].minor.yy172 = yylhsminor.yy172; + case 170: /* alter_table_options ::= alter_table_option */ +{ yylhsminor.yy636 = createAlterTableOptions(pCxt); yylhsminor.yy636 = setTableOption(pCxt, yylhsminor.yy636, yymsp[0].minor.yy25.type, &yymsp[0].minor.yy25.val); } + yymsp[0].minor.yy636 = yylhsminor.yy636; break; - case 170: /* alter_table_options ::= alter_table_options alter_table_option */ -{ yylhsminor.yy172 = setTableOption(pCxt, yymsp[-1].minor.yy172, yymsp[0].minor.yy609.type, &yymsp[0].minor.yy609.val); } - yymsp[-1].minor.yy172 = yylhsminor.yy172; + case 171: /* alter_table_options ::= alter_table_options alter_table_option */ +{ yylhsminor.yy636 = setTableOption(pCxt, yymsp[-1].minor.yy636, yymsp[0].minor.yy25.type, &yymsp[0].minor.yy25.val); } + yymsp[-1].minor.yy636 = yylhsminor.yy636; break; - case 171: /* alter_table_option ::= COMMENT NK_STRING */ -{ yymsp[-1].minor.yy609.type = TABLE_OPTION_COMMENT; yymsp[-1].minor.yy609.val = yymsp[0].minor.yy0; } + case 172: /* alter_table_option ::= COMMENT NK_STRING */ +{ yymsp[-1].minor.yy25.type = TABLE_OPTION_COMMENT; yymsp[-1].minor.yy25.val = yymsp[0].minor.yy0; } break; - case 172: /* alter_table_option ::= TTL NK_INTEGER */ -{ yymsp[-1].minor.yy609.type = TABLE_OPTION_TTL; yymsp[-1].minor.yy609.val = yymsp[0].minor.yy0; } + case 173: /* alter_table_option ::= TTL NK_INTEGER */ +{ yymsp[-1].minor.yy25.type = TABLE_OPTION_TTL; yymsp[-1].minor.yy25.val = yymsp[0].minor.yy0; } break; - case 175: /* col_name ::= column_name */ -{ yylhsminor.yy172 = createColumnNode(pCxt, NULL, &yymsp[0].minor.yy105); } - yymsp[0].minor.yy172 = yylhsminor.yy172; + case 176: /* col_name ::= column_name */ +{ yylhsminor.yy636 = createColumnNode(pCxt, NULL, &yymsp[0].minor.yy53); } + yymsp[0].minor.yy636 = yylhsminor.yy636; break; - case 176: /* cmd ::= SHOW DNODES */ + case 177: /* cmd ::= SHOW DNODES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_DNODES_STMT, NULL, NULL); } break; - case 177: /* cmd ::= SHOW USERS */ + case 178: /* cmd ::= SHOW USERS */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_USERS_STMT, NULL, NULL); } break; - case 178: /* cmd ::= SHOW DATABASES */ + case 179: /* cmd ::= SHOW DATABASES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_DATABASES_STMT, NULL, NULL); } break; - case 179: /* cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_TABLES_STMT, yymsp[-2].minor.yy172, yymsp[0].minor.yy172); } + case 180: /* cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt */ +{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_TABLES_STMT, yymsp[-2].minor.yy636, yymsp[0].minor.yy636); } break; - case 180: /* cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_STABLES_STMT, yymsp[-2].minor.yy172, yymsp[0].minor.yy172); } + case 181: /* cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt */ +{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_STABLES_STMT, yymsp[-2].minor.yy636, yymsp[0].minor.yy636); } break; - case 181: /* cmd ::= SHOW db_name_cond_opt VGROUPS */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_VGROUPS_STMT, yymsp[-1].minor.yy172, NULL); } + case 182: /* cmd ::= SHOW db_name_cond_opt VGROUPS */ +{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_VGROUPS_STMT, yymsp[-1].minor.yy636, NULL); } break; - case 182: /* cmd ::= SHOW MNODES */ + case 183: /* cmd ::= SHOW MNODES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_MNODES_STMT, NULL, NULL); } break; - case 183: /* cmd ::= SHOW MODULES */ + case 184: /* cmd ::= SHOW MODULES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_MODULES_STMT, NULL, NULL); } break; - case 184: /* cmd ::= SHOW QNODES */ + case 185: /* cmd ::= SHOW QNODES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_QNODES_STMT, NULL, NULL); } break; - case 185: /* cmd ::= SHOW FUNCTIONS */ + case 186: /* cmd ::= SHOW FUNCTIONS */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_FUNCTIONS_STMT, NULL, NULL); } break; - case 186: /* cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_INDEXES_STMT, yymsp[-1].minor.yy172, yymsp[0].minor.yy172); } + case 187: /* cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt */ +{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_INDEXES_STMT, yymsp[-1].minor.yy636, yymsp[0].minor.yy636); } break; - case 187: /* cmd ::= SHOW STREAMS */ + case 188: /* cmd ::= SHOW STREAMS */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_STREAMS_STMT, NULL, NULL); } break; - case 188: /* cmd ::= SHOW ACCOUNTS */ + case 189: /* cmd ::= SHOW ACCOUNTS */ { pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); } break; - case 189: /* cmd ::= SHOW APPS */ + case 190: /* cmd ::= SHOW APPS */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_APPS_STMT, NULL, NULL); } break; - case 190: /* cmd ::= SHOW CONNECTIONS */ + case 191: /* cmd ::= SHOW CONNECTIONS */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_CONNECTIONS_STMT, NULL, NULL); } break; - case 191: /* cmd ::= SHOW LICENCE */ - case 192: /* cmd ::= SHOW GRANTS */ yytestcase(yyruleno==192); + case 192: /* cmd ::= SHOW LICENCE */ + case 193: /* cmd ::= SHOW GRANTS */ yytestcase(yyruleno==193); { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_LICENCE_STMT, NULL, NULL); } break; - case 193: /* cmd ::= SHOW CREATE DATABASE db_name */ -{ pCxt->pRootNode = createShowCreateDatabaseStmt(pCxt, &yymsp[0].minor.yy105); } + case 194: /* cmd ::= SHOW CREATE DATABASE db_name */ +{ pCxt->pRootNode = createShowCreateDatabaseStmt(pCxt, &yymsp[0].minor.yy53); } break; - case 194: /* cmd ::= SHOW CREATE TABLE full_table_name */ -{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_TABLE_STMT, yymsp[0].minor.yy172); } + case 195: /* cmd ::= SHOW CREATE TABLE full_table_name */ +{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_TABLE_STMT, yymsp[0].minor.yy636); } break; - case 195: /* cmd ::= SHOW CREATE STABLE full_table_name */ -{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_STABLE_STMT, yymsp[0].minor.yy172); } + case 196: /* cmd ::= SHOW CREATE STABLE full_table_name */ +{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_STABLE_STMT, yymsp[0].minor.yy636); } break; - case 196: /* cmd ::= SHOW QUERIES */ + case 197: /* cmd ::= SHOW QUERIES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_QUERIES_STMT, NULL, NULL); } break; - case 197: /* cmd ::= SHOW SCORES */ + case 198: /* cmd ::= SHOW SCORES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_SCORES_STMT, NULL, NULL); } break; - case 198: /* cmd ::= SHOW TOPICS */ + case 199: /* cmd ::= SHOW TOPICS */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_TOPICS_STMT, NULL, NULL); } break; - case 199: /* cmd ::= SHOW VARIABLES */ + case 200: /* cmd ::= SHOW VARIABLES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_VARIABLE_STMT, NULL, NULL); } break; - case 200: /* cmd ::= SHOW BNODES */ + case 201: /* cmd ::= SHOW BNODES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_BNODES_STMT, NULL, NULL); } break; - case 201: /* cmd ::= SHOW SNODES */ + case 202: /* cmd ::= SHOW SNODES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_SNODES_STMT, NULL, NULL); } break; - case 202: /* cmd ::= SHOW CLUSTER */ + case 203: /* cmd ::= SHOW CLUSTER */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_CLUSTER_STMT, NULL, NULL); } break; - case 203: /* cmd ::= SHOW TRANSACTIONS */ + case 204: /* cmd ::= SHOW TRANSACTIONS */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_TRANSACTIONS_STMT, NULL, NULL); } break; - case 204: /* db_name_cond_opt ::= */ - case 209: /* from_db_opt ::= */ yytestcase(yyruleno==209); -{ yymsp[1].minor.yy172 = createDefaultDatabaseCondValue(pCxt); } + case 205: /* db_name_cond_opt ::= */ + case 210: /* from_db_opt ::= */ yytestcase(yyruleno==210); +{ yymsp[1].minor.yy636 = createDefaultDatabaseCondValue(pCxt); } + break; + case 206: /* db_name_cond_opt ::= db_name NK_DOT */ +{ yylhsminor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy53); } + yymsp[-1].minor.yy636 = yylhsminor.yy636; break; - case 205: /* db_name_cond_opt ::= db_name NK_DOT */ -{ yylhsminor.yy172 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy105); } - yymsp[-1].minor.yy172 = yylhsminor.yy172; + case 207: /* like_pattern_opt ::= */ + case 218: /* index_options ::= */ yytestcase(yyruleno==218); + case 250: /* into_opt ::= */ yytestcase(yyruleno==250); + case 403: /* where_clause_opt ::= */ yytestcase(yyruleno==403); + case 407: /* twindow_clause_opt ::= */ yytestcase(yyruleno==407); + case 412: /* sliding_opt ::= */ yytestcase(yyruleno==412); + case 414: /* fill_opt ::= */ yytestcase(yyruleno==414); + case 426: /* having_clause_opt ::= */ yytestcase(yyruleno==426); + case 436: /* slimit_clause_opt ::= */ yytestcase(yyruleno==436); + case 440: /* limit_clause_opt ::= */ yytestcase(yyruleno==440); +{ yymsp[1].minor.yy636 = NULL; } break; - case 206: /* like_pattern_opt ::= */ - case 217: /* index_options ::= */ yytestcase(yyruleno==217); - case 248: /* into_opt ::= */ yytestcase(yyruleno==248); - case 400: /* where_clause_opt ::= */ yytestcase(yyruleno==400); - case 404: /* twindow_clause_opt ::= */ yytestcase(yyruleno==404); - case 409: /* sliding_opt ::= */ yytestcase(yyruleno==409); - case 411: /* fill_opt ::= */ yytestcase(yyruleno==411); - case 423: /* having_clause_opt ::= */ yytestcase(yyruleno==423); - case 433: /* slimit_clause_opt ::= */ yytestcase(yyruleno==433); - case 437: /* limit_clause_opt ::= */ yytestcase(yyruleno==437); -{ yymsp[1].minor.yy172 = NULL; } + case 208: /* like_pattern_opt ::= LIKE NK_STRING */ +{ yymsp[-1].minor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); } break; - case 207: /* like_pattern_opt ::= LIKE NK_STRING */ -{ yymsp[-1].minor.yy172 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); } + case 209: /* table_name_cond ::= table_name */ +{ yylhsminor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy53); } + yymsp[0].minor.yy636 = yylhsminor.yy636; break; - case 208: /* table_name_cond ::= table_name */ -{ yylhsminor.yy172 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy105); } - yymsp[0].minor.yy172 = yylhsminor.yy172; + case 211: /* from_db_opt ::= FROM db_name */ +{ yymsp[-1].minor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy53); } break; - case 210: /* from_db_opt ::= FROM db_name */ -{ yymsp[-1].minor.yy172 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy105); } + case 214: /* func_name ::= function_name */ +{ yylhsminor.yy636 = createFunctionNode(pCxt, &yymsp[0].minor.yy53, NULL); } + yymsp[0].minor.yy636 = yylhsminor.yy636; break; - case 213: /* func_name ::= function_name */ -{ yylhsminor.yy172 = createFunctionNode(pCxt, &yymsp[0].minor.yy105, NULL); } - yymsp[0].minor.yy172 = yylhsminor.yy172; + case 215: /* cmd ::= CREATE SMA INDEX not_exists_opt index_name ON table_name index_options */ +{ pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_SMA, yymsp[-4].minor.yy603, &yymsp[-3].minor.yy53, &yymsp[-1].minor.yy53, NULL, yymsp[0].minor.yy636); } break; - case 214: /* cmd ::= CREATE SMA INDEX not_exists_opt index_name ON table_name index_options */ -{ pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_SMA, yymsp[-4].minor.yy617, &yymsp[-3].minor.yy105, &yymsp[-1].minor.yy105, NULL, yymsp[0].minor.yy172); } + case 216: /* cmd ::= CREATE FULLTEXT INDEX not_exists_opt index_name ON table_name NK_LP col_name_list NK_RP */ +{ pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_FULLTEXT, yymsp[-6].minor.yy603, &yymsp[-5].minor.yy53, &yymsp[-3].minor.yy53, yymsp[-1].minor.yy236, NULL); } break; - case 215: /* cmd ::= CREATE FULLTEXT INDEX not_exists_opt index_name ON table_name NK_LP col_name_list NK_RP */ -{ pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_FULLTEXT, yymsp[-6].minor.yy617, &yymsp[-5].minor.yy105, &yymsp[-3].minor.yy105, yymsp[-1].minor.yy60, NULL); } + case 217: /* cmd ::= DROP INDEX exists_opt index_name ON table_name */ +{ pCxt->pRootNode = createDropIndexStmt(pCxt, yymsp[-3].minor.yy603, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy53); } break; - case 216: /* cmd ::= DROP INDEX exists_opt index_name ON table_name */ -{ pCxt->pRootNode = createDropIndexStmt(pCxt, yymsp[-3].minor.yy617, &yymsp[-2].minor.yy105, &yymsp[0].minor.yy105); } + case 219: /* index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt */ +{ yymsp[-8].minor.yy636 = createIndexOption(pCxt, yymsp[-6].minor.yy236, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), NULL, yymsp[0].minor.yy636); } break; - case 218: /* index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt */ -{ yymsp[-8].minor.yy172 = createIndexOption(pCxt, yymsp[-6].minor.yy60, releaseRawExprNode(pCxt, yymsp[-2].minor.yy172), NULL, yymsp[0].minor.yy172); } + case 220: /* index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt */ +{ yymsp[-10].minor.yy636 = createIndexOption(pCxt, yymsp[-8].minor.yy236, releaseRawExprNode(pCxt, yymsp[-4].minor.yy636), releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), yymsp[0].minor.yy636); } break; - case 219: /* index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt */ -{ yymsp[-10].minor.yy172 = createIndexOption(pCxt, yymsp[-8].minor.yy60, releaseRawExprNode(pCxt, yymsp[-4].minor.yy172), releaseRawExprNode(pCxt, yymsp[-2].minor.yy172), yymsp[0].minor.yy172); } + case 223: /* func ::= function_name NK_LP expression_list NK_RP */ +{ yylhsminor.yy636 = createFunctionNode(pCxt, &yymsp[-3].minor.yy53, yymsp[-1].minor.yy236); } + yymsp[-3].minor.yy636 = yylhsminor.yy636; break; - case 222: /* func ::= function_name NK_LP expression_list NK_RP */ -{ yylhsminor.yy172 = createFunctionNode(pCxt, &yymsp[-3].minor.yy105, yymsp[-1].minor.yy60); } - yymsp[-3].minor.yy172 = yylhsminor.yy172; + case 224: /* cmd ::= CREATE TOPIC not_exists_opt topic_name topic_options AS query_expression */ +{ pCxt->pRootNode = createCreateTopicStmt(pCxt, yymsp[-4].minor.yy603, &yymsp[-3].minor.yy53, yymsp[0].minor.yy636, NULL, yymsp[-2].minor.yy636); } break; - case 223: /* cmd ::= CREATE TOPIC not_exists_opt topic_name topic_options AS query_expression */ -{ pCxt->pRootNode = createCreateTopicStmt(pCxt, yymsp[-4].minor.yy617, &yymsp[-3].minor.yy105, yymsp[0].minor.yy172, NULL, yymsp[-2].minor.yy172); } + case 225: /* cmd ::= CREATE TOPIC not_exists_opt topic_name topic_options AS db_name */ +{ pCxt->pRootNode = createCreateTopicStmt(pCxt, yymsp[-4].minor.yy603, &yymsp[-3].minor.yy53, NULL, &yymsp[0].minor.yy53, yymsp[-2].minor.yy636); } break; - case 224: /* cmd ::= CREATE TOPIC not_exists_opt topic_name topic_options AS db_name */ -{ pCxt->pRootNode = createCreateTopicStmt(pCxt, yymsp[-4].minor.yy617, &yymsp[-3].minor.yy105, NULL, &yymsp[0].minor.yy105, yymsp[-2].minor.yy172); } + case 226: /* cmd ::= DROP TOPIC exists_opt topic_name */ +{ pCxt->pRootNode = createDropTopicStmt(pCxt, yymsp[-1].minor.yy603, &yymsp[0].minor.yy53); } break; - case 225: /* cmd ::= DROP TOPIC exists_opt topic_name */ -{ pCxt->pRootNode = createDropTopicStmt(pCxt, yymsp[-1].minor.yy617, &yymsp[0].minor.yy105); } + case 227: /* cmd ::= DROP CGROUP exists_opt cgroup_name ON topic_name */ +{ pCxt->pRootNode = createDropCGroupStmt(pCxt, yymsp[-3].minor.yy603, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy53); } break; - case 226: /* topic_options ::= */ -{ yymsp[1].minor.yy172 = createTopicOptions(pCxt); } + case 228: /* topic_options ::= */ +{ yymsp[1].minor.yy636 = createTopicOptions(pCxt); } break; - case 227: /* topic_options ::= topic_options WITH TABLE */ -{ ((STopicOptions*)yymsp[-2].minor.yy172)->withTable = true; yylhsminor.yy172 = yymsp[-2].minor.yy172; } - yymsp[-2].minor.yy172 = yylhsminor.yy172; + case 229: /* topic_options ::= topic_options WITH TABLE */ +{ ((STopicOptions*)yymsp[-2].minor.yy636)->withTable = true; yylhsminor.yy636 = yymsp[-2].minor.yy636; } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; - case 228: /* topic_options ::= topic_options WITH SCHEMA */ -{ ((STopicOptions*)yymsp[-2].minor.yy172)->withSchema = true; yylhsminor.yy172 = yymsp[-2].minor.yy172; } - yymsp[-2].minor.yy172 = yylhsminor.yy172; + case 230: /* topic_options ::= topic_options WITH SCHEMA */ +{ ((STopicOptions*)yymsp[-2].minor.yy636)->withSchema = true; yylhsminor.yy636 = yymsp[-2].minor.yy636; } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; - case 229: /* topic_options ::= topic_options WITH TAG */ -{ ((STopicOptions*)yymsp[-2].minor.yy172)->withTag = true; yylhsminor.yy172 = yymsp[-2].minor.yy172; } - yymsp[-2].minor.yy172 = yylhsminor.yy172; + case 231: /* topic_options ::= topic_options WITH TAG */ +{ ((STopicOptions*)yymsp[-2].minor.yy636)->withTag = true; yylhsminor.yy636 = yymsp[-2].minor.yy636; } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; - case 230: /* cmd ::= DESC full_table_name */ - case 231: /* cmd ::= DESCRIBE full_table_name */ yytestcase(yyruleno==231); -{ pCxt->pRootNode = createDescribeStmt(pCxt, yymsp[0].minor.yy172); } + case 232: /* cmd ::= DESC full_table_name */ + case 233: /* cmd ::= DESCRIBE full_table_name */ yytestcase(yyruleno==233); +{ pCxt->pRootNode = createDescribeStmt(pCxt, yymsp[0].minor.yy636); } break; - case 232: /* cmd ::= RESET QUERY CACHE */ + case 234: /* cmd ::= RESET QUERY CACHE */ { pCxt->pRootNode = createResetQueryCacheStmt(pCxt); } break; - case 233: /* cmd ::= EXPLAIN analyze_opt explain_options query_expression */ -{ pCxt->pRootNode = createExplainStmt(pCxt, yymsp[-2].minor.yy617, yymsp[-1].minor.yy172, yymsp[0].minor.yy172); } + case 235: /* cmd ::= EXPLAIN analyze_opt explain_options query_expression */ +{ pCxt->pRootNode = createExplainStmt(pCxt, yymsp[-2].minor.yy603, yymsp[-1].minor.yy636, yymsp[0].minor.yy636); } break; - case 235: /* analyze_opt ::= ANALYZE */ - case 243: /* agg_func_opt ::= AGGREGATE */ yytestcase(yyruleno==243); - case 390: /* set_quantifier_opt ::= DISTINCT */ yytestcase(yyruleno==390); -{ yymsp[0].minor.yy617 = true; } + case 237: /* analyze_opt ::= ANALYZE */ + case 245: /* agg_func_opt ::= AGGREGATE */ yytestcase(yyruleno==245); + case 393: /* set_quantifier_opt ::= DISTINCT */ yytestcase(yyruleno==393); +{ yymsp[0].minor.yy603 = true; } break; - case 236: /* explain_options ::= */ -{ yymsp[1].minor.yy172 = createDefaultExplainOptions(pCxt); } + case 238: /* explain_options ::= */ +{ yymsp[1].minor.yy636 = createDefaultExplainOptions(pCxt); } break; - case 237: /* explain_options ::= explain_options VERBOSE NK_BOOL */ -{ yylhsminor.yy172 = setExplainVerbose(pCxt, yymsp[-2].minor.yy172, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; + case 239: /* explain_options ::= explain_options VERBOSE NK_BOOL */ +{ yylhsminor.yy636 = setExplainVerbose(pCxt, yymsp[-2].minor.yy636, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; - case 238: /* explain_options ::= explain_options RATIO NK_FLOAT */ -{ yylhsminor.yy172 = setExplainRatio(pCxt, yymsp[-2].minor.yy172, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; + case 240: /* explain_options ::= explain_options RATIO NK_FLOAT */ +{ yylhsminor.yy636 = setExplainRatio(pCxt, yymsp[-2].minor.yy636, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; - case 239: /* cmd ::= COMPACT VNODES IN NK_LP integer_list NK_RP */ -{ pCxt->pRootNode = createCompactStmt(pCxt, yymsp[-1].minor.yy60); } + case 241: /* cmd ::= COMPACT VNODES IN NK_LP integer_list NK_RP */ +{ pCxt->pRootNode = createCompactStmt(pCxt, yymsp[-1].minor.yy236); } break; - case 240: /* cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt */ -{ pCxt->pRootNode = createCreateFunctionStmt(pCxt, yymsp[-6].minor.yy617, yymsp[-8].minor.yy617, &yymsp[-5].minor.yy105, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy248, yymsp[0].minor.yy140); } + case 242: /* cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt */ +{ pCxt->pRootNode = createCreateFunctionStmt(pCxt, yymsp[-6].minor.yy603, yymsp[-8].minor.yy603, &yymsp[-5].minor.yy53, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy450, yymsp[0].minor.yy158); } break; - case 241: /* cmd ::= DROP FUNCTION exists_opt function_name */ -{ pCxt->pRootNode = createDropFunctionStmt(pCxt, yymsp[-1].minor.yy617, &yymsp[0].minor.yy105); } + case 243: /* cmd ::= DROP FUNCTION exists_opt function_name */ +{ pCxt->pRootNode = createDropFunctionStmt(pCxt, yymsp[-1].minor.yy603, &yymsp[0].minor.yy53); } break; - case 244: /* bufsize_opt ::= */ -{ yymsp[1].minor.yy140 = 0; } + case 246: /* bufsize_opt ::= */ +{ yymsp[1].minor.yy158 = 0; } break; - case 245: /* bufsize_opt ::= BUFSIZE NK_INTEGER */ -{ yymsp[-1].minor.yy140 = taosStr2Int32(yymsp[0].minor.yy0.z, NULL, 10); } + case 247: /* bufsize_opt ::= BUFSIZE NK_INTEGER */ +{ yymsp[-1].minor.yy158 = taosStr2Int32(yymsp[0].minor.yy0.z, NULL, 10); } break; - case 246: /* cmd ::= CREATE STREAM not_exists_opt stream_name stream_options into_opt AS query_expression */ -{ pCxt->pRootNode = createCreateStreamStmt(pCxt, yymsp[-5].minor.yy617, &yymsp[-4].minor.yy105, yymsp[-2].minor.yy172, yymsp[-3].minor.yy172, yymsp[0].minor.yy172); } + case 248: /* cmd ::= CREATE STREAM not_exists_opt stream_name stream_options into_opt AS query_expression */ +{ pCxt->pRootNode = createCreateStreamStmt(pCxt, yymsp[-5].minor.yy603, &yymsp[-4].minor.yy53, yymsp[-2].minor.yy636, yymsp[-3].minor.yy636, yymsp[0].minor.yy636); } break; - case 247: /* cmd ::= DROP STREAM exists_opt stream_name */ -{ pCxt->pRootNode = createDropStreamStmt(pCxt, yymsp[-1].minor.yy617, &yymsp[0].minor.yy105); } + case 249: /* cmd ::= DROP STREAM exists_opt stream_name */ +{ pCxt->pRootNode = createDropStreamStmt(pCxt, yymsp[-1].minor.yy603, &yymsp[0].minor.yy53); } break; - case 249: /* into_opt ::= INTO full_table_name */ - case 371: /* from_clause ::= FROM table_reference_list */ yytestcase(yyruleno==371); - case 401: /* where_clause_opt ::= WHERE search_condition */ yytestcase(yyruleno==401); - case 424: /* having_clause_opt ::= HAVING search_condition */ yytestcase(yyruleno==424); -{ yymsp[-1].minor.yy172 = yymsp[0].minor.yy172; } + case 251: /* into_opt ::= INTO full_table_name */ + case 374: /* from_clause ::= FROM table_reference_list */ yytestcase(yyruleno==374); + case 404: /* where_clause_opt ::= WHERE search_condition */ yytestcase(yyruleno==404); + case 427: /* having_clause_opt ::= HAVING search_condition */ yytestcase(yyruleno==427); +{ yymsp[-1].minor.yy636 = yymsp[0].minor.yy636; } break; - case 250: /* stream_options ::= */ -{ yymsp[1].minor.yy172 = createStreamOptions(pCxt); } + case 252: /* stream_options ::= */ +{ yymsp[1].minor.yy636 = createStreamOptions(pCxt); } break; - case 251: /* stream_options ::= stream_options TRIGGER AT_ONCE */ -{ ((SStreamOptions*)yymsp[-2].minor.yy172)->triggerType = STREAM_TRIGGER_AT_ONCE; yylhsminor.yy172 = yymsp[-2].minor.yy172; } - yymsp[-2].minor.yy172 = yylhsminor.yy172; + case 253: /* stream_options ::= stream_options TRIGGER AT_ONCE */ +{ ((SStreamOptions*)yymsp[-2].minor.yy636)->triggerType = STREAM_TRIGGER_AT_ONCE; yylhsminor.yy636 = yymsp[-2].minor.yy636; } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; - case 252: /* stream_options ::= stream_options TRIGGER WINDOW_CLOSE */ -{ ((SStreamOptions*)yymsp[-2].minor.yy172)->triggerType = STREAM_TRIGGER_WINDOW_CLOSE; yylhsminor.yy172 = yymsp[-2].minor.yy172; } - yymsp[-2].minor.yy172 = yylhsminor.yy172; + case 254: /* stream_options ::= stream_options TRIGGER WINDOW_CLOSE */ +{ ((SStreamOptions*)yymsp[-2].minor.yy636)->triggerType = STREAM_TRIGGER_WINDOW_CLOSE; yylhsminor.yy636 = yymsp[-2].minor.yy636; } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; - case 253: /* stream_options ::= stream_options WATERMARK duration_literal */ -{ ((SStreamOptions*)yymsp[-2].minor.yy172)->pWatermark = releaseRawExprNode(pCxt, yymsp[0].minor.yy172); yylhsminor.yy172 = yymsp[-2].minor.yy172; } - yymsp[-2].minor.yy172 = yylhsminor.yy172; + case 255: /* stream_options ::= stream_options WATERMARK duration_literal */ +{ ((SStreamOptions*)yymsp[-2].minor.yy636)->pWatermark = releaseRawExprNode(pCxt, yymsp[0].minor.yy636); yylhsminor.yy636 = yymsp[-2].minor.yy636; } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; - case 254: /* cmd ::= KILL CONNECTION NK_INTEGER */ + case 256: /* cmd ::= KILL CONNECTION NK_INTEGER */ { pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_CONNECTION_STMT, &yymsp[0].minor.yy0); } break; - case 255: /* cmd ::= KILL QUERY NK_INTEGER */ + case 257: /* cmd ::= KILL QUERY NK_INTEGER */ { pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_QUERY_STMT, &yymsp[0].minor.yy0); } break; - case 256: /* cmd ::= KILL TRANSACTION NK_INTEGER */ + case 258: /* cmd ::= KILL TRANSACTION NK_INTEGER */ { pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_TRANSACTION_STMT, &yymsp[0].minor.yy0); } break; - case 257: /* cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */ + case 259: /* cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */ { pCxt->pRootNode = createMergeVgroupStmt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } break; - case 258: /* cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */ -{ pCxt->pRootNode = createRedistributeVgroupStmt(pCxt, &yymsp[-1].minor.yy0, yymsp[0].minor.yy60); } + case 260: /* cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */ +{ pCxt->pRootNode = createRedistributeVgroupStmt(pCxt, &yymsp[-1].minor.yy0, yymsp[0].minor.yy236); } break; - case 259: /* cmd ::= SPLIT VGROUP NK_INTEGER */ + case 261: /* cmd ::= SPLIT VGROUP NK_INTEGER */ { pCxt->pRootNode = createSplitVgroupStmt(pCxt, &yymsp[0].minor.yy0); } break; - case 260: /* dnode_list ::= DNODE NK_INTEGER */ -{ yymsp[-1].minor.yy60 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } - break; - case 262: /* cmd ::= SYNCDB db_name REPLICA */ -{ pCxt->pRootNode = createSyncdbStmt(pCxt, &yymsp[-1].minor.yy105); } - break; - case 264: /* literal ::= NK_INTEGER */ -{ yylhsminor.yy172 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy172 = yylhsminor.yy172; - break; - case 265: /* literal ::= NK_FLOAT */ -{ yylhsminor.yy172 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy172 = yylhsminor.yy172; - break; - case 266: /* literal ::= NK_STRING */ -{ yylhsminor.yy172 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy172 = yylhsminor.yy172; - break; - case 267: /* literal ::= NK_BOOL */ -{ yylhsminor.yy172 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy172 = yylhsminor.yy172; - break; - case 268: /* literal ::= TIMESTAMP NK_STRING */ -{ yylhsminor.yy172 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0)); } - yymsp[-1].minor.yy172 = yylhsminor.yy172; - break; - case 269: /* literal ::= duration_literal */ - case 279: /* signed_literal ::= signed */ yytestcase(yyruleno==279); - case 298: /* expression ::= literal */ yytestcase(yyruleno==298); - case 299: /* expression ::= pseudo_column */ yytestcase(yyruleno==299); - case 300: /* expression ::= column_reference */ yytestcase(yyruleno==300); - case 301: /* expression ::= function_expression */ yytestcase(yyruleno==301); - case 302: /* expression ::= subquery */ yytestcase(yyruleno==302); - case 327: /* function_expression ::= literal_func */ yytestcase(yyruleno==327); - case 363: /* boolean_value_expression ::= boolean_primary */ yytestcase(yyruleno==363); - case 367: /* boolean_primary ::= predicate */ yytestcase(yyruleno==367); - case 369: /* common_expression ::= expression */ yytestcase(yyruleno==369); - case 370: /* common_expression ::= boolean_value_expression */ yytestcase(yyruleno==370); - case 372: /* table_reference_list ::= table_reference */ yytestcase(yyruleno==372); - case 374: /* table_reference ::= table_primary */ yytestcase(yyruleno==374); - case 375: /* table_reference ::= joined_table */ yytestcase(yyruleno==375); - case 379: /* table_primary ::= parenthesized_joined_table */ yytestcase(yyruleno==379); - case 426: /* query_expression_body ::= query_primary */ yytestcase(yyruleno==426); - case 429: /* query_primary ::= query_specification */ yytestcase(yyruleno==429); -{ yylhsminor.yy172 = yymsp[0].minor.yy172; } - yymsp[0].minor.yy172 = yylhsminor.yy172; - break; - case 270: /* literal ::= NULL */ -{ yylhsminor.yy172 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy172 = yylhsminor.yy172; - break; - case 271: /* literal ::= NK_QUESTION */ -{ yylhsminor.yy172 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createPlaceholderValueNode(pCxt, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy172 = yylhsminor.yy172; - break; - case 272: /* duration_literal ::= NK_VARIABLE */ -{ yylhsminor.yy172 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy172 = yylhsminor.yy172; - break; - case 273: /* signed ::= NK_INTEGER */ -{ yylhsminor.yy172 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy172 = yylhsminor.yy172; - break; - case 274: /* signed ::= NK_PLUS NK_INTEGER */ -{ yymsp[-1].minor.yy172 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0); } - break; - case 275: /* signed ::= NK_MINUS NK_INTEGER */ + case 262: /* dnode_list ::= DNODE NK_INTEGER */ +{ yymsp[-1].minor.yy236 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } + break; + case 264: /* cmd ::= SYNCDB db_name REPLICA */ +{ pCxt->pRootNode = createSyncdbStmt(pCxt, &yymsp[-1].minor.yy53); } + break; + case 266: /* literal ::= NK_INTEGER */ +{ yylhsminor.yy636 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy636 = yylhsminor.yy636; + break; + case 267: /* literal ::= NK_FLOAT */ +{ yylhsminor.yy636 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy636 = yylhsminor.yy636; + break; + case 268: /* literal ::= NK_STRING */ +{ yylhsminor.yy636 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy636 = yylhsminor.yy636; + break; + case 269: /* literal ::= NK_BOOL */ +{ yylhsminor.yy636 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy636 = yylhsminor.yy636; + break; + case 270: /* literal ::= TIMESTAMP NK_STRING */ +{ yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0)); } + yymsp[-1].minor.yy636 = yylhsminor.yy636; + break; + case 271: /* literal ::= duration_literal */ + case 281: /* signed_literal ::= signed */ yytestcase(yyruleno==281); + case 301: /* expression ::= literal */ yytestcase(yyruleno==301); + case 302: /* expression ::= pseudo_column */ yytestcase(yyruleno==302); + case 303: /* expression ::= column_reference */ yytestcase(yyruleno==303); + case 304: /* expression ::= function_expression */ yytestcase(yyruleno==304); + case 305: /* expression ::= subquery */ yytestcase(yyruleno==305); + case 330: /* function_expression ::= literal_func */ yytestcase(yyruleno==330); + case 366: /* boolean_value_expression ::= boolean_primary */ yytestcase(yyruleno==366); + case 370: /* boolean_primary ::= predicate */ yytestcase(yyruleno==370); + case 372: /* common_expression ::= expression */ yytestcase(yyruleno==372); + case 373: /* common_expression ::= boolean_value_expression */ yytestcase(yyruleno==373); + case 375: /* table_reference_list ::= table_reference */ yytestcase(yyruleno==375); + case 377: /* table_reference ::= table_primary */ yytestcase(yyruleno==377); + case 378: /* table_reference ::= joined_table */ yytestcase(yyruleno==378); + case 382: /* table_primary ::= parenthesized_joined_table */ yytestcase(yyruleno==382); + case 429: /* query_expression_body ::= query_primary */ yytestcase(yyruleno==429); + case 432: /* query_primary ::= query_specification */ yytestcase(yyruleno==432); +{ yylhsminor.yy636 = yymsp[0].minor.yy636; } + yymsp[0].minor.yy636 = yylhsminor.yy636; + break; + case 272: /* literal ::= NULL */ +{ yylhsminor.yy636 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy636 = yylhsminor.yy636; + break; + case 273: /* literal ::= NK_QUESTION */ +{ yylhsminor.yy636 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createPlaceholderValueNode(pCxt, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy636 = yylhsminor.yy636; + break; + case 274: /* duration_literal ::= NK_VARIABLE */ +{ yylhsminor.yy636 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy636 = yylhsminor.yy636; + break; + case 275: /* signed ::= NK_INTEGER */ +{ yylhsminor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy636 = yylhsminor.yy636; + break; + case 276: /* signed ::= NK_PLUS NK_INTEGER */ +{ yymsp[-1].minor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0); } + break; + case 277: /* signed ::= NK_MINUS NK_INTEGER */ { SToken t = yymsp[-1].minor.yy0; t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; - yylhsminor.yy172 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &t); + yylhsminor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &t); } - yymsp[-1].minor.yy172 = yylhsminor.yy172; + yymsp[-1].minor.yy636 = yylhsminor.yy636; break; - case 276: /* signed ::= NK_FLOAT */ -{ yylhsminor.yy172 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy172 = yylhsminor.yy172; + case 278: /* signed ::= NK_FLOAT */ +{ yylhsminor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy636 = yylhsminor.yy636; break; - case 277: /* signed ::= NK_PLUS NK_FLOAT */ -{ yymsp[-1].minor.yy172 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); } + case 279: /* signed ::= NK_PLUS NK_FLOAT */ +{ yymsp[-1].minor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); } break; - case 278: /* signed ::= NK_MINUS NK_FLOAT */ + case 280: /* signed ::= NK_MINUS NK_FLOAT */ { SToken t = yymsp[-1].minor.yy0; t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; - yylhsminor.yy172 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &t); + yylhsminor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &t); } - yymsp[-1].minor.yy172 = yylhsminor.yy172; + yymsp[-1].minor.yy636 = yylhsminor.yy636; break; - case 280: /* signed_literal ::= NK_STRING */ -{ yylhsminor.yy172 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy172 = yylhsminor.yy172; + case 282: /* signed_literal ::= NK_STRING */ +{ yylhsminor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy636 = yylhsminor.yy636; break; - case 281: /* signed_literal ::= NK_BOOL */ -{ yylhsminor.yy172 = createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy172 = yylhsminor.yy172; + case 283: /* signed_literal ::= NK_BOOL */ +{ yylhsminor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy636 = yylhsminor.yy636; break; - case 282: /* signed_literal ::= TIMESTAMP NK_STRING */ -{ yymsp[-1].minor.yy172 = createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0); } + case 284: /* signed_literal ::= TIMESTAMP NK_STRING */ +{ yymsp[-1].minor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0); } break; - case 283: /* signed_literal ::= duration_literal */ - case 285: /* signed_literal ::= literal_func */ yytestcase(yyruleno==285); - case 341: /* star_func_para ::= expression */ yytestcase(yyruleno==341); - case 396: /* select_item ::= common_expression */ yytestcase(yyruleno==396); - case 442: /* search_condition ::= common_expression */ yytestcase(yyruleno==442); -{ yylhsminor.yy172 = releaseRawExprNode(pCxt, yymsp[0].minor.yy172); } - yymsp[0].minor.yy172 = yylhsminor.yy172; + case 285: /* signed_literal ::= duration_literal */ + case 287: /* signed_literal ::= literal_func */ yytestcase(yyruleno==287); + case 344: /* star_func_para ::= expression */ yytestcase(yyruleno==344); + case 399: /* select_item ::= common_expression */ yytestcase(yyruleno==399); + case 445: /* search_condition ::= common_expression */ yytestcase(yyruleno==445); +{ yylhsminor.yy636 = releaseRawExprNode(pCxt, yymsp[0].minor.yy636); } + yymsp[0].minor.yy636 = yylhsminor.yy636; break; - case 284: /* signed_literal ::= NULL */ -{ yylhsminor.yy172 = createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy172 = yylhsminor.yy172; + case 286: /* signed_literal ::= NULL */ +{ yylhsminor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy636 = yylhsminor.yy636; break; - case 303: /* expression ::= NK_LP expression NK_RP */ - case 368: /* boolean_primary ::= NK_LP boolean_value_expression NK_RP */ yytestcase(yyruleno==368); -{ yylhsminor.yy172 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, releaseRawExprNode(pCxt, yymsp[-1].minor.yy172)); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; + case 306: /* expression ::= NK_LP expression NK_RP */ + case 371: /* boolean_primary ::= NK_LP boolean_value_expression NK_RP */ yytestcase(yyruleno==371); +{ yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, releaseRawExprNode(pCxt, yymsp[-1].minor.yy636)); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; - case 304: /* expression ::= NK_PLUS expression */ + case 307: /* expression ::= NK_PLUS expression */ { - SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy172); - yylhsminor.yy172 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, releaseRawExprNode(pCxt, yymsp[0].minor.yy172)); + SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); + yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, releaseRawExprNode(pCxt, yymsp[0].minor.yy636)); } - yymsp[-1].minor.yy172 = yylhsminor.yy172; + yymsp[-1].minor.yy636 = yylhsminor.yy636; break; - case 305: /* expression ::= NK_MINUS expression */ + case 308: /* expression ::= NK_MINUS expression */ { - SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy172); - yylhsminor.yy172 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, createOperatorNode(pCxt, OP_TYPE_MINUS, releaseRawExprNode(pCxt, yymsp[0].minor.yy172), NULL)); + SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); + yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, createOperatorNode(pCxt, OP_TYPE_MINUS, releaseRawExprNode(pCxt, yymsp[0].minor.yy636), NULL)); } - yymsp[-1].minor.yy172 = yylhsminor.yy172; + yymsp[-1].minor.yy636 = yylhsminor.yy636; break; - case 306: /* expression ::= expression NK_PLUS expression */ + case 309: /* expression ::= expression NK_PLUS expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy172); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy172); - yylhsminor.yy172 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_ADD, releaseRawExprNode(pCxt, yymsp[-2].minor.yy172), releaseRawExprNode(pCxt, yymsp[0].minor.yy172))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy636); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); + yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_ADD, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; - case 307: /* expression ::= expression NK_MINUS expression */ + case 310: /* expression ::= expression NK_MINUS expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy172); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy172); - yylhsminor.yy172 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_SUB, releaseRawExprNode(pCxt, yymsp[-2].minor.yy172), releaseRawExprNode(pCxt, yymsp[0].minor.yy172))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy636); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); + yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_SUB, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; - case 308: /* expression ::= expression NK_STAR expression */ + case 311: /* expression ::= expression NK_STAR expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy172); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy172); - yylhsminor.yy172 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_MULTI, releaseRawExprNode(pCxt, yymsp[-2].minor.yy172), releaseRawExprNode(pCxt, yymsp[0].minor.yy172))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy636); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); + yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_MULTI, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; - case 309: /* expression ::= expression NK_SLASH expression */ + case 312: /* expression ::= expression NK_SLASH expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy172); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy172); - yylhsminor.yy172 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_DIV, releaseRawExprNode(pCxt, yymsp[-2].minor.yy172), releaseRawExprNode(pCxt, yymsp[0].minor.yy172))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy636); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); + yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_DIV, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; - case 310: /* expression ::= expression NK_REM expression */ + case 313: /* expression ::= expression NK_REM expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy172); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy172); - yylhsminor.yy172 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_MOD, releaseRawExprNode(pCxt, yymsp[-2].minor.yy172), releaseRawExprNode(pCxt, yymsp[0].minor.yy172))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy636); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); + yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_MOD, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; - case 311: /* expression ::= column_reference NK_ARROW NK_STRING */ + case 314: /* expression ::= column_reference NK_ARROW NK_STRING */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy172); - yylhsminor.yy172 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_JSON_GET_VALUE, releaseRawExprNode(pCxt, yymsp[-2].minor.yy172), createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy636); + yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_JSON_GET_VALUE, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0))); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; - break; - case 312: /* expression_list ::= expression */ -{ yylhsminor.yy60 = createNodeList(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy172)); } - yymsp[0].minor.yy60 = yylhsminor.yy60; - break; - case 313: /* expression_list ::= expression_list NK_COMMA expression */ -{ yylhsminor.yy60 = addNodeToList(pCxt, yymsp[-2].minor.yy60, releaseRawExprNode(pCxt, yymsp[0].minor.yy172)); } - yymsp[-2].minor.yy60 = yylhsminor.yy60; - break; - case 314: /* column_reference ::= column_name */ -{ yylhsminor.yy172 = createRawExprNode(pCxt, &yymsp[0].minor.yy105, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy105)); } - yymsp[0].minor.yy172 = yylhsminor.yy172; - break; - case 315: /* column_reference ::= table_name NK_DOT column_name */ -{ yylhsminor.yy172 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy105, &yymsp[0].minor.yy105, createColumnNode(pCxt, &yymsp[-2].minor.yy105, &yymsp[0].minor.yy105)); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; - break; - case 316: /* pseudo_column ::= ROWTS */ - case 317: /* pseudo_column ::= TBNAME */ yytestcase(yyruleno==317); - case 319: /* pseudo_column ::= QSTARTTS */ yytestcase(yyruleno==319); - case 320: /* pseudo_column ::= QENDTS */ yytestcase(yyruleno==320); - case 321: /* pseudo_column ::= WSTARTTS */ yytestcase(yyruleno==321); - case 322: /* pseudo_column ::= WENDTS */ yytestcase(yyruleno==322); - case 323: /* pseudo_column ::= WDURATION */ yytestcase(yyruleno==323); - case 329: /* literal_func ::= NOW */ yytestcase(yyruleno==329); -{ yylhsminor.yy172 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL)); } - yymsp[0].minor.yy172 = yylhsminor.yy172; - break; - case 318: /* pseudo_column ::= table_name NK_DOT TBNAME */ -{ yylhsminor.yy172 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy105, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-2].minor.yy105)))); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; - break; - case 324: /* function_expression ::= function_name NK_LP expression_list NK_RP */ - case 325: /* function_expression ::= star_func NK_LP star_func_para_list NK_RP */ yytestcase(yyruleno==325); -{ yylhsminor.yy172 = createRawExprNodeExt(pCxt, &yymsp[-3].minor.yy105, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-3].minor.yy105, yymsp[-1].minor.yy60)); } - yymsp[-3].minor.yy172 = yylhsminor.yy172; - break; - case 326: /* function_expression ::= CAST NK_LP expression AS type_name NK_RP */ -{ yylhsminor.yy172 = createRawExprNodeExt(pCxt, &yymsp[-5].minor.yy0, &yymsp[0].minor.yy0, createCastFunctionNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy172), yymsp[-1].minor.yy248)); } - yymsp[-5].minor.yy172 = yylhsminor.yy172; - break; - case 328: /* literal_func ::= noarg_func NK_LP NK_RP */ -{ yylhsminor.yy172 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy105, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-2].minor.yy105, NULL)); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; - break; - case 337: /* star_func_para_list ::= NK_STAR */ -{ yylhsminor.yy60 = createNodeList(pCxt, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy60 = yylhsminor.yy60; - break; - case 342: /* star_func_para ::= table_name NK_DOT NK_STAR */ - case 399: /* select_item ::= table_name NK_DOT NK_STAR */ yytestcase(yyruleno==399); -{ yylhsminor.yy172 = createColumnNode(pCxt, &yymsp[-2].minor.yy105, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; - break; - case 343: /* predicate ::= expression compare_op expression */ - case 348: /* predicate ::= expression in_op in_predicate_value */ yytestcase(yyruleno==348); + yymsp[-2].minor.yy636 = yylhsminor.yy636; + break; + case 315: /* expression_list ::= expression */ +{ yylhsminor.yy236 = createNodeList(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy636)); } + yymsp[0].minor.yy236 = yylhsminor.yy236; + break; + case 316: /* expression_list ::= expression_list NK_COMMA expression */ +{ yylhsminor.yy236 = addNodeToList(pCxt, yymsp[-2].minor.yy236, releaseRawExprNode(pCxt, yymsp[0].minor.yy636)); } + yymsp[-2].minor.yy236 = yylhsminor.yy236; + break; + case 317: /* column_reference ::= column_name */ +{ yylhsminor.yy636 = createRawExprNode(pCxt, &yymsp[0].minor.yy53, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy53)); } + yymsp[0].minor.yy636 = yylhsminor.yy636; + break; + case 318: /* column_reference ::= table_name NK_DOT column_name */ +{ yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy53, createColumnNode(pCxt, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy53)); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; + break; + case 319: /* pseudo_column ::= ROWTS */ + case 320: /* pseudo_column ::= TBNAME */ yytestcase(yyruleno==320); + case 322: /* pseudo_column ::= QSTARTTS */ yytestcase(yyruleno==322); + case 323: /* pseudo_column ::= QENDTS */ yytestcase(yyruleno==323); + case 324: /* pseudo_column ::= WSTARTTS */ yytestcase(yyruleno==324); + case 325: /* pseudo_column ::= WENDTS */ yytestcase(yyruleno==325); + case 326: /* pseudo_column ::= WDURATION */ yytestcase(yyruleno==326); + case 332: /* literal_func ::= NOW */ yytestcase(yyruleno==332); +{ yylhsminor.yy636 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL)); } + yymsp[0].minor.yy636 = yylhsminor.yy636; + break; + case 321: /* pseudo_column ::= table_name NK_DOT TBNAME */ +{ yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-2].minor.yy53)))); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; + break; + case 327: /* function_expression ::= function_name NK_LP expression_list NK_RP */ + case 328: /* function_expression ::= star_func NK_LP star_func_para_list NK_RP */ yytestcase(yyruleno==328); +{ yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-3].minor.yy53, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-3].minor.yy53, yymsp[-1].minor.yy236)); } + yymsp[-3].minor.yy636 = yylhsminor.yy636; + break; + case 329: /* function_expression ::= CAST NK_LP expression AS type_name NK_RP */ +{ yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-5].minor.yy0, &yymsp[0].minor.yy0, createCastFunctionNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy636), yymsp[-1].minor.yy450)); } + yymsp[-5].minor.yy636 = yylhsminor.yy636; + break; + case 331: /* literal_func ::= noarg_func NK_LP NK_RP */ +{ yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-2].minor.yy53, NULL)); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; + break; + case 340: /* star_func_para_list ::= NK_STAR */ +{ yylhsminor.yy236 = createNodeList(pCxt, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy236 = yylhsminor.yy236; + break; + case 345: /* star_func_para ::= table_name NK_DOT NK_STAR */ + case 402: /* select_item ::= table_name NK_DOT NK_STAR */ yytestcase(yyruleno==402); +{ yylhsminor.yy636 = createColumnNode(pCxt, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; + break; + case 346: /* predicate ::= expression compare_op expression */ + case 351: /* predicate ::= expression in_op in_predicate_value */ yytestcase(yyruleno==351); { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy172); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy172); - yylhsminor.yy172 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, yymsp[-1].minor.yy572, releaseRawExprNode(pCxt, yymsp[-2].minor.yy172), releaseRawExprNode(pCxt, yymsp[0].minor.yy172))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy636); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); + yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, yymsp[-1].minor.yy136, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; - case 344: /* predicate ::= expression BETWEEN expression AND expression */ + case 347: /* predicate ::= expression BETWEEN expression AND expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-4].minor.yy172); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy172); - yylhsminor.yy172 = createRawExprNodeExt(pCxt, &s, &e, createBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-4].minor.yy172), releaseRawExprNode(pCxt, yymsp[-2].minor.yy172), releaseRawExprNode(pCxt, yymsp[0].minor.yy172))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-4].minor.yy636); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); + yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &e, createBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-4].minor.yy636), releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); } - yymsp[-4].minor.yy172 = yylhsminor.yy172; + yymsp[-4].minor.yy636 = yylhsminor.yy636; break; - case 345: /* predicate ::= expression NOT BETWEEN expression AND expression */ + case 348: /* predicate ::= expression NOT BETWEEN expression AND expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-5].minor.yy172); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy172); - yylhsminor.yy172 = createRawExprNodeExt(pCxt, &s, &e, createNotBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy172), releaseRawExprNode(pCxt, yymsp[-2].minor.yy172), releaseRawExprNode(pCxt, yymsp[0].minor.yy172))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-5].minor.yy636); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); + yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &e, createNotBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy636), releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); } - yymsp[-5].minor.yy172 = yylhsminor.yy172; + yymsp[-5].minor.yy636 = yylhsminor.yy636; break; - case 346: /* predicate ::= expression IS NULL */ + case 349: /* predicate ::= expression IS NULL */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy172); - yylhsminor.yy172 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NULL, releaseRawExprNode(pCxt, yymsp[-2].minor.yy172), NULL)); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy636); + yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NULL, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), NULL)); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; - case 347: /* predicate ::= expression IS NOT NULL */ + case 350: /* predicate ::= expression IS NOT NULL */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-3].minor.yy172); - yylhsminor.yy172 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NOT_NULL, releaseRawExprNode(pCxt, yymsp[-3].minor.yy172), NULL)); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-3].minor.yy636); + yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NOT_NULL, releaseRawExprNode(pCxt, yymsp[-3].minor.yy636), NULL)); } - yymsp[-3].minor.yy172 = yylhsminor.yy172; + yymsp[-3].minor.yy636 = yylhsminor.yy636; break; - case 349: /* compare_op ::= NK_LT */ -{ yymsp[0].minor.yy572 = OP_TYPE_LOWER_THAN; } + case 352: /* compare_op ::= NK_LT */ +{ yymsp[0].minor.yy136 = OP_TYPE_LOWER_THAN; } break; - case 350: /* compare_op ::= NK_GT */ -{ yymsp[0].minor.yy572 = OP_TYPE_GREATER_THAN; } + case 353: /* compare_op ::= NK_GT */ +{ yymsp[0].minor.yy136 = OP_TYPE_GREATER_THAN; } break; - case 351: /* compare_op ::= NK_LE */ -{ yymsp[0].minor.yy572 = OP_TYPE_LOWER_EQUAL; } + case 354: /* compare_op ::= NK_LE */ +{ yymsp[0].minor.yy136 = OP_TYPE_LOWER_EQUAL; } break; - case 352: /* compare_op ::= NK_GE */ -{ yymsp[0].minor.yy572 = OP_TYPE_GREATER_EQUAL; } + case 355: /* compare_op ::= NK_GE */ +{ yymsp[0].minor.yy136 = OP_TYPE_GREATER_EQUAL; } break; - case 353: /* compare_op ::= NK_NE */ -{ yymsp[0].minor.yy572 = OP_TYPE_NOT_EQUAL; } + case 356: /* compare_op ::= NK_NE */ +{ yymsp[0].minor.yy136 = OP_TYPE_NOT_EQUAL; } break; - case 354: /* compare_op ::= NK_EQ */ -{ yymsp[0].minor.yy572 = OP_TYPE_EQUAL; } + case 357: /* compare_op ::= NK_EQ */ +{ yymsp[0].minor.yy136 = OP_TYPE_EQUAL; } break; - case 355: /* compare_op ::= LIKE */ -{ yymsp[0].minor.yy572 = OP_TYPE_LIKE; } + case 358: /* compare_op ::= LIKE */ +{ yymsp[0].minor.yy136 = OP_TYPE_LIKE; } break; - case 356: /* compare_op ::= NOT LIKE */ -{ yymsp[-1].minor.yy572 = OP_TYPE_NOT_LIKE; } + case 359: /* compare_op ::= NOT LIKE */ +{ yymsp[-1].minor.yy136 = OP_TYPE_NOT_LIKE; } break; - case 357: /* compare_op ::= MATCH */ -{ yymsp[0].minor.yy572 = OP_TYPE_MATCH; } + case 360: /* compare_op ::= MATCH */ +{ yymsp[0].minor.yy136 = OP_TYPE_MATCH; } break; - case 358: /* compare_op ::= NMATCH */ -{ yymsp[0].minor.yy572 = OP_TYPE_NMATCH; } + case 361: /* compare_op ::= NMATCH */ +{ yymsp[0].minor.yy136 = OP_TYPE_NMATCH; } break; - case 359: /* compare_op ::= CONTAINS */ -{ yymsp[0].minor.yy572 = OP_TYPE_JSON_CONTAINS; } + case 362: /* compare_op ::= CONTAINS */ +{ yymsp[0].minor.yy136 = OP_TYPE_JSON_CONTAINS; } break; - case 360: /* in_op ::= IN */ -{ yymsp[0].minor.yy572 = OP_TYPE_IN; } + case 363: /* in_op ::= IN */ +{ yymsp[0].minor.yy136 = OP_TYPE_IN; } break; - case 361: /* in_op ::= NOT IN */ -{ yymsp[-1].minor.yy572 = OP_TYPE_NOT_IN; } + case 364: /* in_op ::= NOT IN */ +{ yymsp[-1].minor.yy136 = OP_TYPE_NOT_IN; } break; - case 362: /* in_predicate_value ::= NK_LP expression_list NK_RP */ -{ yylhsminor.yy172 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, createNodeListNode(pCxt, yymsp[-1].minor.yy60)); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; + case 365: /* in_predicate_value ::= NK_LP expression_list NK_RP */ +{ yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, createNodeListNode(pCxt, yymsp[-1].minor.yy236)); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; - case 364: /* boolean_value_expression ::= NOT boolean_primary */ + case 367: /* boolean_value_expression ::= NOT boolean_primary */ { - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy172); - yylhsminor.yy172 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_NOT, releaseRawExprNode(pCxt, yymsp[0].minor.yy172), NULL)); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); + yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_NOT, releaseRawExprNode(pCxt, yymsp[0].minor.yy636), NULL)); } - yymsp[-1].minor.yy172 = yylhsminor.yy172; + yymsp[-1].minor.yy636 = yylhsminor.yy636; break; - case 365: /* boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */ + case 368: /* boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy172); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy172); - yylhsminor.yy172 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_OR, releaseRawExprNode(pCxt, yymsp[-2].minor.yy172), releaseRawExprNode(pCxt, yymsp[0].minor.yy172))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy636); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); + yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_OR, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; - case 366: /* boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */ + case 369: /* boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy172); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy172); - yylhsminor.yy172 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_AND, releaseRawExprNode(pCxt, yymsp[-2].minor.yy172), releaseRawExprNode(pCxt, yymsp[0].minor.yy172))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy636); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); + yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_AND, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; - case 373: /* table_reference_list ::= table_reference_list NK_COMMA table_reference */ -{ yylhsminor.yy172 = createJoinTableNode(pCxt, JOIN_TYPE_INNER, yymsp[-2].minor.yy172, yymsp[0].minor.yy172, NULL); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; + case 376: /* table_reference_list ::= table_reference_list NK_COMMA table_reference */ +{ yylhsminor.yy636 = createJoinTableNode(pCxt, JOIN_TYPE_INNER, yymsp[-2].minor.yy636, yymsp[0].minor.yy636, NULL); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; - case 376: /* table_primary ::= table_name alias_opt */ -{ yylhsminor.yy172 = createRealTableNode(pCxt, NULL, &yymsp[-1].minor.yy105, &yymsp[0].minor.yy105); } - yymsp[-1].minor.yy172 = yylhsminor.yy172; + case 379: /* table_primary ::= table_name alias_opt */ +{ yylhsminor.yy636 = createRealTableNode(pCxt, NULL, &yymsp[-1].minor.yy53, &yymsp[0].minor.yy53); } + yymsp[-1].minor.yy636 = yylhsminor.yy636; break; - case 377: /* table_primary ::= db_name NK_DOT table_name alias_opt */ -{ yylhsminor.yy172 = createRealTableNode(pCxt, &yymsp[-3].minor.yy105, &yymsp[-1].minor.yy105, &yymsp[0].minor.yy105); } - yymsp[-3].minor.yy172 = yylhsminor.yy172; + case 380: /* table_primary ::= db_name NK_DOT table_name alias_opt */ +{ yylhsminor.yy636 = createRealTableNode(pCxt, &yymsp[-3].minor.yy53, &yymsp[-1].minor.yy53, &yymsp[0].minor.yy53); } + yymsp[-3].minor.yy636 = yylhsminor.yy636; break; - case 378: /* table_primary ::= subquery alias_opt */ -{ yylhsminor.yy172 = createTempTableNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy172), &yymsp[0].minor.yy105); } - yymsp[-1].minor.yy172 = yylhsminor.yy172; + case 381: /* table_primary ::= subquery alias_opt */ +{ yylhsminor.yy636 = createTempTableNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy636), &yymsp[0].minor.yy53); } + yymsp[-1].minor.yy636 = yylhsminor.yy636; break; - case 380: /* alias_opt ::= */ -{ yymsp[1].minor.yy105 = nil_token; } + case 383: /* alias_opt ::= */ +{ yymsp[1].minor.yy53 = nil_token; } break; - case 381: /* alias_opt ::= table_alias */ -{ yylhsminor.yy105 = yymsp[0].minor.yy105; } - yymsp[0].minor.yy105 = yylhsminor.yy105; + case 384: /* alias_opt ::= table_alias */ +{ yylhsminor.yy53 = yymsp[0].minor.yy53; } + yymsp[0].minor.yy53 = yylhsminor.yy53; break; - case 382: /* alias_opt ::= AS table_alias */ -{ yymsp[-1].minor.yy105 = yymsp[0].minor.yy105; } + case 385: /* alias_opt ::= AS table_alias */ +{ yymsp[-1].minor.yy53 = yymsp[0].minor.yy53; } break; - case 383: /* parenthesized_joined_table ::= NK_LP joined_table NK_RP */ - case 384: /* parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ yytestcase(yyruleno==384); -{ yymsp[-2].minor.yy172 = yymsp[-1].minor.yy172; } + case 386: /* parenthesized_joined_table ::= NK_LP joined_table NK_RP */ + case 387: /* parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ yytestcase(yyruleno==387); +{ yymsp[-2].minor.yy636 = yymsp[-1].minor.yy636; } break; - case 385: /* joined_table ::= table_reference join_type JOIN table_reference ON search_condition */ -{ yylhsminor.yy172 = createJoinTableNode(pCxt, yymsp[-4].minor.yy636, yymsp[-5].minor.yy172, yymsp[-2].minor.yy172, yymsp[0].minor.yy172); } - yymsp[-5].minor.yy172 = yylhsminor.yy172; + case 388: /* joined_table ::= table_reference join_type JOIN table_reference ON search_condition */ +{ yylhsminor.yy636 = createJoinTableNode(pCxt, yymsp[-4].minor.yy342, yymsp[-5].minor.yy636, yymsp[-2].minor.yy636, yymsp[0].minor.yy636); } + yymsp[-5].minor.yy636 = yylhsminor.yy636; break; - case 386: /* join_type ::= */ -{ yymsp[1].minor.yy636 = JOIN_TYPE_INNER; } + case 389: /* join_type ::= */ +{ yymsp[1].minor.yy342 = JOIN_TYPE_INNER; } break; - case 387: /* join_type ::= INNER */ -{ yymsp[0].minor.yy636 = JOIN_TYPE_INNER; } + case 390: /* join_type ::= INNER */ +{ yymsp[0].minor.yy342 = JOIN_TYPE_INNER; } break; - case 388: /* query_specification ::= SELECT set_quantifier_opt select_list from_clause where_clause_opt partition_by_clause_opt twindow_clause_opt group_by_clause_opt having_clause_opt */ + case 391: /* query_specification ::= SELECT set_quantifier_opt select_list from_clause where_clause_opt partition_by_clause_opt twindow_clause_opt group_by_clause_opt having_clause_opt */ { - yymsp[-8].minor.yy172 = createSelectStmt(pCxt, yymsp[-7].minor.yy617, yymsp[-6].minor.yy60, yymsp[-5].minor.yy172); - yymsp[-8].minor.yy172 = addWhereClause(pCxt, yymsp[-8].minor.yy172, yymsp[-4].minor.yy172); - yymsp[-8].minor.yy172 = addPartitionByClause(pCxt, yymsp[-8].minor.yy172, yymsp[-3].minor.yy60); - yymsp[-8].minor.yy172 = addWindowClauseClause(pCxt, yymsp[-8].minor.yy172, yymsp[-2].minor.yy172); - yymsp[-8].minor.yy172 = addGroupByClause(pCxt, yymsp[-8].minor.yy172, yymsp[-1].minor.yy60); - yymsp[-8].minor.yy172 = addHavingClause(pCxt, yymsp[-8].minor.yy172, yymsp[0].minor.yy172); + yymsp[-8].minor.yy636 = createSelectStmt(pCxt, yymsp[-7].minor.yy603, yymsp[-6].minor.yy236, yymsp[-5].minor.yy636); + yymsp[-8].minor.yy636 = addWhereClause(pCxt, yymsp[-8].minor.yy636, yymsp[-4].minor.yy636); + yymsp[-8].minor.yy636 = addPartitionByClause(pCxt, yymsp[-8].minor.yy636, yymsp[-3].minor.yy236); + yymsp[-8].minor.yy636 = addWindowClauseClause(pCxt, yymsp[-8].minor.yy636, yymsp[-2].minor.yy636); + yymsp[-8].minor.yy636 = addGroupByClause(pCxt, yymsp[-8].minor.yy636, yymsp[-1].minor.yy236); + yymsp[-8].minor.yy636 = addHavingClause(pCxt, yymsp[-8].minor.yy636, yymsp[0].minor.yy636); } break; - case 391: /* set_quantifier_opt ::= ALL */ -{ yymsp[0].minor.yy617 = false; } + case 394: /* set_quantifier_opt ::= ALL */ +{ yymsp[0].minor.yy603 = false; } break; - case 392: /* select_list ::= NK_STAR */ -{ yymsp[0].minor.yy60 = NULL; } + case 395: /* select_list ::= NK_STAR */ +{ yymsp[0].minor.yy236 = NULL; } break; - case 397: /* select_item ::= common_expression column_alias */ -{ yylhsminor.yy172 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy172), &yymsp[0].minor.yy105); } - yymsp[-1].minor.yy172 = yylhsminor.yy172; + case 400: /* select_item ::= common_expression column_alias */ +{ yylhsminor.yy636 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy636), &yymsp[0].minor.yy53); } + yymsp[-1].minor.yy636 = yylhsminor.yy636; break; - case 398: /* select_item ::= common_expression AS column_alias */ -{ yylhsminor.yy172 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy172), &yymsp[0].minor.yy105); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; + case 401: /* select_item ::= common_expression AS column_alias */ +{ yylhsminor.yy636 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), &yymsp[0].minor.yy53); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; - case 403: /* partition_by_clause_opt ::= PARTITION BY expression_list */ - case 420: /* group_by_clause_opt ::= GROUP BY group_by_list */ yytestcase(yyruleno==420); - case 432: /* order_by_clause_opt ::= ORDER BY sort_specification_list */ yytestcase(yyruleno==432); -{ yymsp[-2].minor.yy60 = yymsp[0].minor.yy60; } + case 406: /* partition_by_clause_opt ::= PARTITION BY expression_list */ + case 423: /* group_by_clause_opt ::= GROUP BY group_by_list */ yytestcase(yyruleno==423); + case 435: /* order_by_clause_opt ::= ORDER BY sort_specification_list */ yytestcase(yyruleno==435); +{ yymsp[-2].minor.yy236 = yymsp[0].minor.yy236; } break; - case 405: /* twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */ -{ yymsp[-5].minor.yy172 = createSessionWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy172), releaseRawExprNode(pCxt, yymsp[-1].minor.yy172)); } + case 408: /* twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */ +{ yymsp[-5].minor.yy636 = createSessionWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy636), releaseRawExprNode(pCxt, yymsp[-1].minor.yy636)); } break; - case 406: /* twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP */ -{ yymsp[-3].minor.yy172 = createStateWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy172)); } + case 409: /* twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP */ +{ yymsp[-3].minor.yy636 = createStateWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy636)); } break; - case 407: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */ -{ yymsp[-5].minor.yy172 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy172), NULL, yymsp[-1].minor.yy172, yymsp[0].minor.yy172); } + case 410: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */ +{ yymsp[-5].minor.yy636 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy636), NULL, yymsp[-1].minor.yy636, yymsp[0].minor.yy636); } break; - case 408: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */ -{ yymsp[-7].minor.yy172 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy172), releaseRawExprNode(pCxt, yymsp[-3].minor.yy172), yymsp[-1].minor.yy172, yymsp[0].minor.yy172); } + case 411: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */ +{ yymsp[-7].minor.yy636 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy636), releaseRawExprNode(pCxt, yymsp[-3].minor.yy636), yymsp[-1].minor.yy636, yymsp[0].minor.yy636); } break; - case 410: /* sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */ -{ yymsp[-3].minor.yy172 = releaseRawExprNode(pCxt, yymsp[-1].minor.yy172); } + case 413: /* sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */ +{ yymsp[-3].minor.yy636 = releaseRawExprNode(pCxt, yymsp[-1].minor.yy636); } break; - case 412: /* fill_opt ::= FILL NK_LP fill_mode NK_RP */ -{ yymsp[-3].minor.yy172 = createFillNode(pCxt, yymsp[-1].minor.yy202, NULL); } + case 415: /* fill_opt ::= FILL NK_LP fill_mode NK_RP */ +{ yymsp[-3].minor.yy636 = createFillNode(pCxt, yymsp[-1].minor.yy18, NULL); } break; - case 413: /* fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */ -{ yymsp[-5].minor.yy172 = createFillNode(pCxt, FILL_MODE_VALUE, createNodeListNode(pCxt, yymsp[-1].minor.yy60)); } + case 416: /* fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */ +{ yymsp[-5].minor.yy636 = createFillNode(pCxt, FILL_MODE_VALUE, createNodeListNode(pCxt, yymsp[-1].minor.yy236)); } break; - case 414: /* fill_mode ::= NONE */ -{ yymsp[0].minor.yy202 = FILL_MODE_NONE; } + case 417: /* fill_mode ::= NONE */ +{ yymsp[0].minor.yy18 = FILL_MODE_NONE; } break; - case 415: /* fill_mode ::= PREV */ -{ yymsp[0].minor.yy202 = FILL_MODE_PREV; } + case 418: /* fill_mode ::= PREV */ +{ yymsp[0].minor.yy18 = FILL_MODE_PREV; } break; - case 416: /* fill_mode ::= NULL */ -{ yymsp[0].minor.yy202 = FILL_MODE_NULL; } + case 419: /* fill_mode ::= NULL */ +{ yymsp[0].minor.yy18 = FILL_MODE_NULL; } break; - case 417: /* fill_mode ::= LINEAR */ -{ yymsp[0].minor.yy202 = FILL_MODE_LINEAR; } + case 420: /* fill_mode ::= LINEAR */ +{ yymsp[0].minor.yy18 = FILL_MODE_LINEAR; } break; - case 418: /* fill_mode ::= NEXT */ -{ yymsp[0].minor.yy202 = FILL_MODE_NEXT; } + case 421: /* fill_mode ::= NEXT */ +{ yymsp[0].minor.yy18 = FILL_MODE_NEXT; } break; - case 421: /* group_by_list ::= expression */ -{ yylhsminor.yy60 = createNodeList(pCxt, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy172))); } - yymsp[0].minor.yy60 = yylhsminor.yy60; + case 424: /* group_by_list ::= expression */ +{ yylhsminor.yy236 = createNodeList(pCxt, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); } + yymsp[0].minor.yy236 = yylhsminor.yy236; break; - case 422: /* group_by_list ::= group_by_list NK_COMMA expression */ -{ yylhsminor.yy60 = addNodeToList(pCxt, yymsp[-2].minor.yy60, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy172))); } - yymsp[-2].minor.yy60 = yylhsminor.yy60; + case 425: /* group_by_list ::= group_by_list NK_COMMA expression */ +{ yylhsminor.yy236 = addNodeToList(pCxt, yymsp[-2].minor.yy236, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); } + yymsp[-2].minor.yy236 = yylhsminor.yy236; break; - case 425: /* query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt */ + case 428: /* query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt */ { - yylhsminor.yy172 = addOrderByClause(pCxt, yymsp[-3].minor.yy172, yymsp[-2].minor.yy60); - yylhsminor.yy172 = addSlimitClause(pCxt, yylhsminor.yy172, yymsp[-1].minor.yy172); - yylhsminor.yy172 = addLimitClause(pCxt, yylhsminor.yy172, yymsp[0].minor.yy172); + yylhsminor.yy636 = addOrderByClause(pCxt, yymsp[-3].minor.yy636, yymsp[-2].minor.yy236); + yylhsminor.yy636 = addSlimitClause(pCxt, yylhsminor.yy636, yymsp[-1].minor.yy636); + yylhsminor.yy636 = addLimitClause(pCxt, yylhsminor.yy636, yymsp[0].minor.yy636); } - yymsp[-3].minor.yy172 = yylhsminor.yy172; + yymsp[-3].minor.yy636 = yylhsminor.yy636; break; - case 427: /* query_expression_body ::= query_expression_body UNION ALL query_expression_body */ -{ yylhsminor.yy172 = createSetOperator(pCxt, SET_OP_TYPE_UNION_ALL, yymsp[-3].minor.yy172, yymsp[0].minor.yy172); } - yymsp[-3].minor.yy172 = yylhsminor.yy172; + case 430: /* query_expression_body ::= query_expression_body UNION ALL query_expression_body */ +{ yylhsminor.yy636 = createSetOperator(pCxt, SET_OP_TYPE_UNION_ALL, yymsp[-3].minor.yy636, yymsp[0].minor.yy636); } + yymsp[-3].minor.yy636 = yylhsminor.yy636; break; - case 428: /* query_expression_body ::= query_expression_body UNION query_expression_body */ -{ yylhsminor.yy172 = createSetOperator(pCxt, SET_OP_TYPE_UNION, yymsp[-2].minor.yy172, yymsp[0].minor.yy172); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; + case 431: /* query_expression_body ::= query_expression_body UNION query_expression_body */ +{ yylhsminor.yy636 = createSetOperator(pCxt, SET_OP_TYPE_UNION, yymsp[-2].minor.yy636, yymsp[0].minor.yy636); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; - case 430: /* query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP */ -{ yymsp[-5].minor.yy172 = yymsp[-4].minor.yy172; } - yy_destructor(yypParser,350,&yymsp[-3].minor); - yy_destructor(yypParser,351,&yymsp[-2].minor); - yy_destructor(yypParser,352,&yymsp[-1].minor); + case 433: /* query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP */ +{ yymsp[-5].minor.yy636 = yymsp[-4].minor.yy636; } + yy_destructor(yypParser,353,&yymsp[-3].minor); + yy_destructor(yypParser,354,&yymsp[-2].minor); + yy_destructor(yypParser,355,&yymsp[-1].minor); break; - case 434: /* slimit_clause_opt ::= SLIMIT NK_INTEGER */ - case 438: /* limit_clause_opt ::= LIMIT NK_INTEGER */ yytestcase(yyruleno==438); -{ yymsp[-1].minor.yy172 = createLimitNode(pCxt, &yymsp[0].minor.yy0, NULL); } + case 437: /* slimit_clause_opt ::= SLIMIT NK_INTEGER */ + case 441: /* limit_clause_opt ::= LIMIT NK_INTEGER */ yytestcase(yyruleno==441); +{ yymsp[-1].minor.yy636 = createLimitNode(pCxt, &yymsp[0].minor.yy0, NULL); } break; - case 435: /* slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */ - case 439: /* limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ yytestcase(yyruleno==439); -{ yymsp[-3].minor.yy172 = createLimitNode(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } + case 438: /* slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */ + case 442: /* limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ yytestcase(yyruleno==442); +{ yymsp[-3].minor.yy636 = createLimitNode(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } break; - case 436: /* slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */ - case 440: /* limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ yytestcase(yyruleno==440); -{ yymsp[-3].minor.yy172 = createLimitNode(pCxt, &yymsp[0].minor.yy0, &yymsp[-2].minor.yy0); } + case 439: /* slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */ + case 443: /* limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ yytestcase(yyruleno==443); +{ yymsp[-3].minor.yy636 = createLimitNode(pCxt, &yymsp[0].minor.yy0, &yymsp[-2].minor.yy0); } break; - case 441: /* subquery ::= NK_LP query_expression NK_RP */ -{ yylhsminor.yy172 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-1].minor.yy172); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; + case 444: /* subquery ::= NK_LP query_expression NK_RP */ +{ yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-1].minor.yy636); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; - case 445: /* sort_specification ::= expression ordering_specification_opt null_ordering_opt */ -{ yylhsminor.yy172 = createOrderByExprNode(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy172), yymsp[-1].minor.yy14, yymsp[0].minor.yy17); } - yymsp[-2].minor.yy172 = yylhsminor.yy172; + case 448: /* sort_specification ::= expression ordering_specification_opt null_ordering_opt */ +{ yylhsminor.yy636 = createOrderByExprNode(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), yymsp[-1].minor.yy430, yymsp[0].minor.yy185); } + yymsp[-2].minor.yy636 = yylhsminor.yy636; break; - case 446: /* ordering_specification_opt ::= */ -{ yymsp[1].minor.yy14 = ORDER_ASC; } + case 449: /* ordering_specification_opt ::= */ +{ yymsp[1].minor.yy430 = ORDER_ASC; } break; - case 447: /* ordering_specification_opt ::= ASC */ -{ yymsp[0].minor.yy14 = ORDER_ASC; } + case 450: /* ordering_specification_opt ::= ASC */ +{ yymsp[0].minor.yy430 = ORDER_ASC; } break; - case 448: /* ordering_specification_opt ::= DESC */ -{ yymsp[0].minor.yy14 = ORDER_DESC; } + case 451: /* ordering_specification_opt ::= DESC */ +{ yymsp[0].minor.yy430 = ORDER_DESC; } break; - case 449: /* null_ordering_opt ::= */ -{ yymsp[1].minor.yy17 = NULL_ORDER_DEFAULT; } + case 452: /* null_ordering_opt ::= */ +{ yymsp[1].minor.yy185 = NULL_ORDER_DEFAULT; } break; - case 450: /* null_ordering_opt ::= NULLS FIRST */ -{ yymsp[-1].minor.yy17 = NULL_ORDER_FIRST; } + case 453: /* null_ordering_opt ::= NULLS FIRST */ +{ yymsp[-1].minor.yy185 = NULL_ORDER_FIRST; } break; - case 451: /* null_ordering_opt ::= NULLS LAST */ -{ yymsp[-1].minor.yy17 = NULL_ORDER_LAST; } + case 454: /* null_ordering_opt ::= NULLS LAST */ +{ yymsp[-1].minor.yy185 = NULL_ORDER_LAST; } break; default: break; diff --git a/source/libs/parser/test/mockCatalog.cpp b/source/libs/parser/test/mockCatalog.cpp index 7297e4e93aac9ffbfb67da92c886fe72a212185b..154f13ea686aa172d9c2ad53bfadcae893305ed0 100644 --- a/source/libs/parser/test/mockCatalog.cpp +++ b/source/libs/parser/test/mockCatalog.cpp @@ -71,11 +71,6 @@ void generateInformationSchema(MockCatalogService* mcs) { .addColumn("stable_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN); builder.done(); } - { - ITableBuilder& builder = mcs->createTableBuilder("information_schema", "user_streams", TSDB_SYSTEM_TABLE, 1) - .addColumn("stream_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN); - builder.done(); - } { ITableBuilder& builder = mcs->createTableBuilder("information_schema", "user_tables", TSDB_SYSTEM_TABLE, 2) .addColumn("db_name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN) @@ -106,6 +101,11 @@ void generatePerformanceSchema(MockCatalogService* mcs) { .addColumn("id", TSDB_DATA_TYPE_INT); builder.done(); } + { + ITableBuilder& builder = mcs->createTableBuilder("performance_schema", "streams", TSDB_SYSTEM_TABLE, 1) + .addColumn("stream_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN); + builder.done(); + } } /* @@ -154,6 +154,13 @@ void generateTestST1(MockCatalogService* mcs) { builder.done(); mcs->createSubTable("test", "st1", "st1s1", 1); mcs->createSubTable("test", "st1", "st1s2", 2); + mcs->createSubTable("test", "st1", "st1s3", 1); +} + +void generateFunctions(MockCatalogService* mcs) { + mcs->createFunction("udf1", TSDB_FUNC_TYPE_SCALAR, TSDB_DATA_TYPE_INT, tDataTypes[TSDB_DATA_TYPE_INT].bytes, 0); + mcs->createFunction("udf2", TSDB_FUNC_TYPE_AGGREGATE, TSDB_DATA_TYPE_DOUBLE, tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, + 8); } } // namespace @@ -162,17 +169,17 @@ int32_t __catalogGetHandle(const char* clusterId, struct SCatalog** catalogHandl int32_t __catalogGetTableMeta(struct SCatalog* pCatalog, void* pRpc, const SEpSet* pMgmtEps, const SName* pTableName, STableMeta** pTableMeta) { - return mockCatalogService->catalogGetTableMeta(pTableName, pTableMeta); + return g_mockCatalogService->catalogGetTableMeta(pTableName, pTableMeta); } int32_t __catalogGetTableHashVgroup(struct SCatalog* pCatalog, void* pRpc, const SEpSet* pMgmtEps, const SName* pTableName, SVgroupInfo* vgInfo) { - return mockCatalogService->catalogGetTableHashVgroup(pTableName, vgInfo); + return g_mockCatalogService->catalogGetTableHashVgroup(pTableName, vgInfo); } int32_t __catalogGetTableDistVgInfo(SCatalog* pCtg, void* pRpc, const SEpSet* pMgmtEps, const SName* pTableName, SArray** pVgList) { - return mockCatalogService->catalogGetTableDistVgInfo(pTableName, pVgList); + return g_mockCatalogService->catalogGetTableDistVgInfo(pTableName, pVgList); } int32_t __catalogGetDBVgVersion(SCatalog* pCtg, const char* dbFName, int32_t* version, int64_t* dbId, @@ -195,8 +202,13 @@ int32_t __catalogChkAuth(SCatalog* pCtg, void* pRpc, const SEpSet* pMgmtEps, con return 0; } +int32_t __catalogGetUdfInfo(SCatalog* pCtg, void* pTrans, const SEpSet* pMgmtEps, const char* funcName, + SFuncInfo* pInfo) { + return g_mockCatalogService->catalogGetUdfInfo(funcName, pInfo); +} + void initMetaDataEnv() { - mockCatalogService.reset(new MockCatalogService()); + g_mockCatalogService.reset(new MockCatalogService()); static Stub stub; stub.set(catalogGetHandle, __catalogGetHandle); @@ -208,6 +220,7 @@ void initMetaDataEnv() { stub.set(catalogGetDBVgInfo, __catalogGetDBVgInfo); stub.set(catalogGetDBCfg, __catalogGetDBCfg); stub.set(catalogChkAuth, __catalogChkAuth); + stub.set(catalogGetUdfInfo, __catalogGetUdfInfo); // { // AddrAny any("libcatalog.so"); // std::map result; @@ -251,11 +264,12 @@ void initMetaDataEnv() { } void generateMetaData() { - generateInformationSchema(mockCatalogService.get()); - generatePerformanceSchema(mockCatalogService.get()); - generateTestT1(mockCatalogService.get()); - generateTestST1(mockCatalogService.get()); - mockCatalogService->showTables(); + generateInformationSchema(g_mockCatalogService.get()); + generatePerformanceSchema(g_mockCatalogService.get()); + generateTestT1(g_mockCatalogService.get()); + generateTestST1(g_mockCatalogService.get()); + generateFunctions(g_mockCatalogService.get()); + g_mockCatalogService->showTables(); } -void destroyMetaDataEnv() { mockCatalogService.reset(); } +void destroyMetaDataEnv() { g_mockCatalogService.reset(); } diff --git a/source/libs/parser/test/mockCatalogService.cpp b/source/libs/parser/test/mockCatalogService.cpp index f86cecb9e3399bf6b5b55c59adcc6b99e1950468..566c4d8b04c0127e04ef9ce0fb0b5eabae7d25da 100644 --- a/source/libs/parser/test/mockCatalogService.cpp +++ b/source/libs/parser/test/mockCatalogService.cpp @@ -23,7 +23,7 @@ #include "tname.h" #include "ttypes.h" -std::unique_ptr mockCatalogService; +std::unique_ptr g_mockCatalogService; class TableBuilder : public ITableBuilder { public: @@ -120,6 +120,38 @@ class MockCatalogServiceImpl { return copyTableVgroup(db, tNameGetTableName(pTableName), vgList); } + int32_t catalogGetUdfInfo(const std::string& funcName, SFuncInfo* pInfo) const { + auto it = udf_.find(funcName); + if (udf_.end() == it) { + return TSDB_CODE_FAILED; + } + memcpy(pInfo, it->second.get(), sizeof(SFuncInfo)); + return TSDB_CODE_SUCCESS; + } + + int32_t catalogGetAllMeta(const SCatalogReq* pCatalogReq, SMetaData* pMetaData) const { + int32_t code = getAllTableMeta(pCatalogReq->pTableMeta, &pMetaData->pTableMeta); + if (TSDB_CODE_SUCCESS == code) { + code = getAllTableVgroup(pCatalogReq->pTableHash, &pMetaData->pTableHash); + } + if (TSDB_CODE_SUCCESS == code) { + code = getAllDbVgroup(pCatalogReq->pDbVgroup, &pMetaData->pDbVgroup); + } + if (TSDB_CODE_SUCCESS == code) { + code = getAllDbCfg(pCatalogReq->pDbCfg, &pMetaData->pDbCfg); + } + if (TSDB_CODE_SUCCESS == code) { + code = getAllDbInfo(pCatalogReq->pDbInfo, &pMetaData->pDbInfo); + } + if (TSDB_CODE_SUCCESS == code) { + code = getAllUserAuth(pCatalogReq->pUser, &pMetaData->pUser); + } + if (TSDB_CODE_SUCCESS == code) { + code = getAllUdf(pCatalogReq->pUdf, &pMetaData->pUdfList); + } + return code; + } + TableBuilder& createTableBuilder(const std::string& db, const std::string& tbname, int8_t tableType, int32_t numOfColumns, int32_t numOfTags) { builder_ = TableBuilder::createTableBuilder(tableType, numOfColumns, numOfTags); @@ -155,9 +187,8 @@ class MockCatalogServiceImpl { // number of backward fills #define NOB(n) ((n) % 2 ? (n) / 2 + 1 : (n) / 2) // center aligned -#define CA(n, s) \ - std::setw(NOF((n) - (s).length())) << "" << (s) << std::setw(NOB((n) - (s).length())) << "" \ - << "|" +#define CA(n, s) std::setw(NOF((n) - int((s).length()))) << "" << (s) \ + << std::setw(NOB((n) - int((s).length()))) << "" << "|" // string field length #define SFL 20 // string field header @@ -203,21 +234,23 @@ class MockCatalogServiceImpl { } } - std::shared_ptr getTableMeta(const std::string& db, const std::string& tbname) const { - DbMetaCache::const_iterator it = meta_.find(db); - if (meta_.end() == it) { - return std::shared_ptr(); - } - TableMetaCache::const_iterator tit = it->second.find(tbname); - if (it->second.end() == tit) { - return std::shared_ptr(); - } - return tit->second; + void createFunction(const std::string& func, int8_t funcType, int8_t outputType, int32_t outputLen, int32_t bufSize) { + std::shared_ptr info(new SFuncInfo); + strcpy(info->name, func.c_str()); + info->funcType = funcType; + info->scriptType = TSDB_FUNC_SCRIPT_BIN_LIB; + info->outputType = outputType; + info->outputLen = outputLen; + info->bufSize = bufSize; + info->pCode = nullptr; + info->pComment = nullptr; + udf_.insert(std::make_pair(func, info)); } private: typedef std::map> TableMetaCache; typedef std::map DbMetaCache; + typedef std::map> UdfMetaCache; std::string toDbname(const std::string& dbFullName) const { std::string::size_type n = dbFullName.find("."); @@ -300,9 +333,128 @@ class MockCatalogServiceImpl { return TSDB_CODE_SUCCESS; } + std::shared_ptr getTableMeta(const std::string& db, const std::string& tbname) const { + DbMetaCache::const_iterator it = meta_.find(db); + if (meta_.end() == it) { + return std::shared_ptr(); + } + TableMetaCache::const_iterator tit = it->second.find(tbname); + if (it->second.end() == tit) { + return std::shared_ptr(); + } + return tit->second; + } + + int32_t getAllTableMeta(SArray* pTableMetaReq, SArray** pTableMetaData) const { + int32_t code = TSDB_CODE_SUCCESS; + if (NULL != pTableMetaReq) { + int32_t ntables = taosArrayGetSize(pTableMetaReq); + *pTableMetaData = taosArrayInit(ntables, POINTER_BYTES); + for (int32_t i = 0; i < ntables; ++i) { + STableMeta* pMeta = NULL; + code = catalogGetTableMeta((const SName*)taosArrayGet(pTableMetaReq, i), &pMeta); + if (TSDB_CODE_SUCCESS == code) { + taosArrayPush(*pTableMetaData, &pMeta); + } else { + break; + } + } + } + return code; + } + + int32_t getAllTableVgroup(SArray* pTableVgroupReq, SArray** pTableVgroupData) const { + int32_t code = TSDB_CODE_SUCCESS; + if (NULL != pTableVgroupReq) { + int32_t ntables = taosArrayGetSize(pTableVgroupReq); + *pTableVgroupData = taosArrayInit(ntables, sizeof(SVgroupInfo)); + for (int32_t i = 0; i < ntables; ++i) { + SVgroupInfo vgInfo = {0}; + code = catalogGetTableHashVgroup((const SName*)taosArrayGet(pTableVgroupReq, i), &vgInfo); + if (TSDB_CODE_SUCCESS == code) { + taosArrayPush(*pTableVgroupData, &vgInfo); + } else { + break; + } + } + } + return code; + } + + int32_t getAllDbVgroup(SArray* pDbVgroupReq, SArray** pDbVgroupData) const { + int32_t code = TSDB_CODE_SUCCESS; + if (NULL != pDbVgroupReq) { + int32_t ndbs = taosArrayGetSize(pDbVgroupReq); + *pDbVgroupData = taosArrayInit(ndbs, POINTER_BYTES); + for (int32_t i = 0; i < ndbs; ++i) { + int64_t zeroVg = 0; + taosArrayPush(*pDbVgroupData, &zeroVg); + } + } + return code; + } + + int32_t getAllDbCfg(SArray* pDbCfgReq, SArray** pDbCfgData) const { + int32_t code = TSDB_CODE_SUCCESS; + if (NULL != pDbCfgReq) { + int32_t ndbs = taosArrayGetSize(pDbCfgReq); + *pDbCfgData = taosArrayInit(ndbs, sizeof(SDbCfgInfo)); + for (int32_t i = 0; i < ndbs; ++i) { + SDbCfgInfo dbCfg = {0}; + taosArrayPush(*pDbCfgData, &dbCfg); + } + } + return code; + } + + int32_t getAllDbInfo(SArray* pDbInfoReq, SArray** pDbInfoData) const { + int32_t code = TSDB_CODE_SUCCESS; + if (NULL != pDbInfoReq) { + int32_t ndbs = taosArrayGetSize(pDbInfoReq); + *pDbInfoData = taosArrayInit(ndbs, sizeof(SDbCfgInfo)); + for (int32_t i = 0; i < ndbs; ++i) { + SDbInfo dbInfo = {0}; + taosArrayPush(*pDbInfoData, &dbInfo); + } + } + return code; + } + + int32_t getAllUserAuth(SArray* pUserAuthReq, SArray** pUserAuthData) const { + int32_t code = TSDB_CODE_SUCCESS; + if (NULL != pUserAuthReq) { + int32_t num = taosArrayGetSize(pUserAuthReq); + *pUserAuthData = taosArrayInit(num, sizeof(bool)); + for (int32_t i = 0; i < num; ++i) { + bool pass = true; + taosArrayPush(*pUserAuthData, &pass); + } + } + return code; + } + + int32_t getAllUdf(SArray* pUdfReq, SArray** pUdfData) const { + int32_t code = TSDB_CODE_SUCCESS; + if (NULL != pUdfReq) { + int32_t num = taosArrayGetSize(pUdfReq); + *pUdfData = taosArrayInit(num, sizeof(SFuncInfo)); + for (int32_t i = 0; i < num; ++i) { + SFuncInfo info = {0}; + code = catalogGetUdfInfo((char*)taosArrayGet(pUdfReq, i), &info); + if (TSDB_CODE_SUCCESS == code) { + taosArrayPush(*pUdfData, &info); + } else { + break; + } + } + } + return code; + } + uint64_t id_; std::unique_ptr builder_; DbMetaCache meta_; + UdfMetaCache udf_; }; MockCatalogService::MockCatalogService() : impl_(new MockCatalogServiceImpl()) {} @@ -321,9 +473,9 @@ void MockCatalogService::createSubTable(const std::string& db, const std::string void MockCatalogService::showTables() const { impl_->showTables(); } -std::shared_ptr MockCatalogService::getTableMeta(const std::string& db, - const std::string& tbname) const { - return impl_->getTableMeta(db, tbname); +void MockCatalogService::createFunction(const std::string& func, int8_t funcType, int8_t outputType, int32_t outputLen, + int32_t bufSize) { + impl_->createFunction(func, funcType, outputType, outputLen, bufSize); } int32_t MockCatalogService::catalogGetTableMeta(const SName* pTableName, STableMeta** pTableMeta) const { @@ -337,3 +489,11 @@ int32_t MockCatalogService::catalogGetTableHashVgroup(const SName* pTableName, S int32_t MockCatalogService::catalogGetTableDistVgInfo(const SName* pTableName, SArray** pVgList) const { return impl_->catalogGetTableDistVgInfo(pTableName, pVgList); } + +int32_t MockCatalogService::catalogGetUdfInfo(const std::string& funcName, SFuncInfo* pInfo) const { + return impl_->catalogGetUdfInfo(funcName, pInfo); +} + +int32_t MockCatalogService::catalogGetAllMeta(const SCatalogReq* pCatalogReq, SMetaData* pMetaData) const { + return impl_->catalogGetAllMeta(pCatalogReq, pMetaData); +} diff --git a/source/libs/parser/test/mockCatalogService.h b/source/libs/parser/test/mockCatalogService.h index edfc40dbc2114611707276d34bbc491714152b26..cb0f10e95bfcb05ce46ea0eb423d9753477db422 100644 --- a/source/libs/parser/test/mockCatalogService.h +++ b/source/libs/parser/test/mockCatalogService.h @@ -56,16 +56,18 @@ class MockCatalogService { int32_t numOfColumns, int32_t numOfTags = 0); void createSubTable(const std::string& db, const std::string& stbname, const std::string& tbname, int16_t vgid); void showTables() const; - std::shared_ptr getTableMeta(const std::string& db, const std::string& tbname) const; + void createFunction(const std::string& func, int8_t funcType, int8_t outputType, int32_t outputLen, int32_t bufSize); int32_t catalogGetTableMeta(const SName* pTableName, STableMeta** pTableMeta) const; int32_t catalogGetTableHashVgroup(const SName* pTableName, SVgroupInfo* vgInfo) const; int32_t catalogGetTableDistVgInfo(const SName* pTableName, SArray** pVgList) const; + int32_t catalogGetUdfInfo(const std::string& funcName, SFuncInfo* pInfo) const; + int32_t catalogGetAllMeta(const SCatalogReq* pCatalogReq, SMetaData* pMetaData) const; private: std::unique_ptr impl_; }; -extern std::unique_ptr mockCatalogService; +extern std::unique_ptr g_mockCatalogService; #endif // MOCK_CATALOG_SERVICE_H diff --git a/source/libs/parser/test/parInitialATest.cpp b/source/libs/parser/test/parInitialATest.cpp index cc0dded5701bbe72c062aa69d339454976e6d1ac..784586dfb2258eab827fb4db40eb4fe9ee70bde9 100644 --- a/source/libs/parser/test/parInitialATest.cpp +++ b/source/libs/parser/test/parInitialATest.cpp @@ -204,7 +204,7 @@ TEST_F(ParserInitialATest, alterTable) { } }; - auto setAlterTagFunc = [&](const char* pTbname, const char* pTagName, const uint8_t* pNewVal, uint32_t bytes) { + auto setAlterTagFunc = [&](const char* pTbname, const char* pTagName, uint8_t* pNewVal, uint32_t bytes) { memset(&expect, 0, sizeof(SVAlterTbReq)); expect.tbName = strdup(pTbname); expect.action = TSDB_ALTER_TABLE_UPDATE_TAG_VAL; @@ -215,7 +215,7 @@ TEST_F(ParserInitialATest, alterTable) { expect.pTagVal = pNewVal; }; - auto setAlterOptionsFunc = [&](const char* pTbname, int32_t ttl, const char* pComment = nullptr) { + auto setAlterOptionsFunc = [&](const char* pTbname, int32_t ttl, char* pComment = nullptr) { memset(&expect, 0, sizeof(SVAlterTbReq)); expect.tbName = strdup(pTbname); expect.action = TSDB_ALTER_TABLE_UPDATE_OPTIONS; @@ -240,7 +240,7 @@ TEST_F(ParserInitialATest, alterTable) { void* pBuf = POINTER_SHIFT(pVgData->pData, sizeof(SMsgHead)); SVAlterTbReq req = {0}; SDecoder coder = {0}; - tDecoderInit(&coder, (const uint8_t*)pBuf, pVgData->size); + tDecoderInit(&coder, (uint8_t*)pBuf, pVgData->size); ASSERT_EQ(tDecodeSVAlterTbReq(&coder, &req), TSDB_CODE_SUCCESS); ASSERT_EQ(std::string(req.tbName), std::string(expect.tbName)); @@ -274,7 +274,7 @@ TEST_F(ParserInitialATest, alterTable) { setAlterOptionsFunc("t1", 10, nullptr); run("ALTER TABLE t1 TTL 10"); - setAlterOptionsFunc("t1", -1, "test"); + setAlterOptionsFunc("t1", -1, (char*)"test"); run("ALTER TABLE t1 COMMENT 'test'"); setAlterColFunc("t1", TSDB_ALTER_TABLE_ADD_COLUMN, "cc1", TSDB_DATA_TYPE_BIGINT); @@ -290,7 +290,7 @@ TEST_F(ParserInitialATest, alterTable) { run("ALTER TABLE t1 RENAME COLUMN c1 cc1"); int32_t val = 10; - setAlterTagFunc("st1s1", "tag1", (const uint8_t*)&val, sizeof(val)); + setAlterTagFunc("st1s1", "tag1", (uint8_t*)&val, sizeof(val)); run("ALTER TABLE st1s1 SET TAG tag1=10"); // todo diff --git a/source/libs/parser/test/parInitialCTest.cpp b/source/libs/parser/test/parInitialCTest.cpp index abcb6bca8bc96f99b2fec79d2813e01524edbf6a..e55f36376cbce26f1954211fe7308070a0a192bd 100644 --- a/source/libs/parser/test/parInitialCTest.cpp +++ b/source/libs/parser/test/parInitialCTest.cpp @@ -90,6 +90,7 @@ TEST_F(ParserInitialCTest, createDatabase) { expect.walLevel = TSDB_DEFAULT_WAL_LEVEL; expect.numOfVgroups = TSDB_DEFAULT_VN_PER_DB; expect.numOfStables = TSDB_DEFAULT_DB_SINGLE_STABLE; + expect.schemaless = TSDB_DEFAULT_DB_SCHEMALESS; }; auto setDbBufferFunc = [&](int32_t buffer) { expect.buffer = buffer; }; @@ -124,6 +125,7 @@ TEST_F(ParserInitialCTest, createDatabase) { taosArrayPush(expect.pRetensions, &retention); ++expect.numOfRetensions; }; + auto setDbSchemalessFunc = [&](int8_t schemaless) { expect.schemaless = schemaless; }; setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) { ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_CREATE_DATABASE_STMT); @@ -149,6 +151,7 @@ TEST_F(ParserInitialCTest, createDatabase) { ASSERT_EQ(req.replications, expect.replications); ASSERT_EQ(req.strict, expect.strict); ASSERT_EQ(req.cacheLastRow, expect.cacheLastRow); + ASSERT_EQ(req.schemaless, expect.schemaless); ASSERT_EQ(req.ignoreExist, expect.ignoreExist); ASSERT_EQ(req.numOfRetensions, expect.numOfRetensions); if (expect.numOfRetensions > 0) { @@ -188,6 +191,7 @@ TEST_F(ParserInitialCTest, createDatabase) { setDbWalLevelFunc(2); setDbVgroupsFunc(100); setDbSingleStableFunc(1); + setDbSchemalessFunc(1); run("CREATE DATABASE IF NOT EXISTS wxy_db " "BUFFER 64 " "CACHELAST 2 " @@ -205,7 +209,8 @@ TEST_F(ParserInitialCTest, createDatabase) { "STRICT 1 " "WAL 2 " "VGROUPS 100 " - "SINGLE_STABLE 1 "); + "SINGLE_STABLE 1 " + "SCHEMALESS 1"); setCreateDbReqFunc("wxy_db", 1); setDbDaysFunc(100); @@ -223,7 +228,44 @@ TEST_F(ParserInitialCTest, createDnode) { run("CREATE DNODE 1.1.1.1 PORT 9000"); } -// todo CREATE FUNCTION +// CREATE [AGGREGATE] FUNCTION [IF NOT EXISTS] func_name AS library_path OUTPUTTYPE type_name [BUFSIZE value] +TEST_F(ParserInitialCTest, createFunction) { + useDb("root", "test"); + + SCreateFuncReq expect = {0}; + + auto setCreateFuncReqFunc = [&](const char* pUdfName, int8_t outputType, int32_t outputBytes = 0, + int8_t funcType = TSDB_FUNC_TYPE_SCALAR, int8_t igExists = 0, int32_t bufSize = 0) { + memset(&expect, 0, sizeof(SCreateFuncReq)); + strcpy(expect.name, pUdfName); + expect.igExists = igExists; + expect.funcType = funcType; + expect.scriptType = TSDB_FUNC_SCRIPT_BIN_LIB; + expect.outputType = outputType; + expect.outputLen = outputBytes > 0 ? outputBytes : tDataTypes[outputType].bytes; + expect.bufSize = bufSize; + }; + + setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) { + ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_CREATE_FUNCTION_STMT); + SCreateFuncReq req = {0}; + ASSERT_TRUE(TSDB_CODE_SUCCESS == tDeserializeSCreateFuncReq(pQuery->pCmdMsg->pMsg, pQuery->pCmdMsg->msgLen, &req)); + + ASSERT_EQ(std::string(req.name), std::string(expect.name)); + ASSERT_EQ(req.igExists, expect.igExists); + ASSERT_EQ(req.funcType, expect.funcType); + ASSERT_EQ(req.scriptType, expect.scriptType); + ASSERT_EQ(req.outputType, expect.outputType); + ASSERT_EQ(req.outputLen, expect.outputLen); + ASSERT_EQ(req.bufSize, expect.bufSize); + }); + + setCreateFuncReqFunc("udf1", TSDB_DATA_TYPE_INT); + // run("CREATE FUNCTION udf1 AS './build/lib/libudf1.so' OUTPUTTYPE INT"); + + setCreateFuncReqFunc("udf2", TSDB_DATA_TYPE_DOUBLE, 0, TSDB_FUNC_TYPE_AGGREGATE, 1, 8); + // run("CREATE AGGREGATE FUNCTION IF NOT EXISTS udf2 AS './build/lib/libudf2.so' OUTPUTTYPE DOUBLE BUFSIZE 8"); +} TEST_F(ParserInitialCTest, createIndexSma) { useDb("root", "test"); diff --git a/source/libs/parser/test/parInitialDTest.cpp b/source/libs/parser/test/parInitialDTest.cpp index 1153b238b1feb1be8167c91acb0bf7f7267a391f..57d349e7eeecd33fd9855f5a0d8df22548c5ceee 100644 --- a/source/libs/parser/test/parInitialDTest.cpp +++ b/source/libs/parser/test/parInitialDTest.cpp @@ -19,7 +19,7 @@ using namespace std; namespace ParserTest { -class ParserInitialDTest : public ParserTestBase {}; +class ParserInitialDTest : public ParserDdlTest {}; // todo delete // todo desc @@ -29,7 +29,37 @@ class ParserInitialDTest : public ParserTestBase {}; TEST_F(ParserInitialDTest, dropBnode) { useDb("root", "test"); - run("drop bnode on dnode 1"); + run("DROP BNODE ON DNODE 1"); +} + +// DROP CGROUP [ IF EXISTS ] cgroup_name ON topic_name +TEST_F(ParserInitialDTest, dropCGroup) { + useDb("root", "test"); + + SMDropCgroupReq expect = {0}; + + auto setDropCgroupReqFunc = [&](const char* pTopicName, const char* pCGroupName, int8_t igNotExists = 0) { + memset(&expect, 0, sizeof(SMDropCgroupReq)); + snprintf(expect.topic, sizeof(expect.topic), "0.%s", pTopicName); + strcpy(expect.cgroup, pCGroupName); + expect.igNotExists = igNotExists; + }; + + setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) { + ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_DROP_CGROUP_STMT); + SMDropCgroupReq req = {0}; + ASSERT_TRUE(TSDB_CODE_SUCCESS == tDeserializeSMDropCgroupReq(pQuery->pCmdMsg->pMsg, pQuery->pCmdMsg->msgLen, &req)); + + ASSERT_EQ(std::string(req.topic), std::string(expect.topic)); + ASSERT_EQ(std::string(req.cgroup), std::string(expect.cgroup)); + ASSERT_EQ(req.igNotExists, expect.igNotExists); + }); + + setDropCgroupReqFunc("tp1", "cg1"); + run("DROP CGROUP cg1 ON tp1"); + + setDropCgroupReqFunc("tp1", "cg1", 1); + run("DROP CGROUP IF EXISTS cg1 ON tp1"); } // todo drop database @@ -73,6 +103,7 @@ TEST_F(ParserInitialDTest, dropTopic) { } TEST_F(ParserInitialDTest, dropUser) { + login("root"); useDb("root", "test"); run("drop user wxy"); diff --git a/source/libs/parser/test/parInsertTest.cpp b/source/libs/parser/test/parInsertTest.cpp index 7fafec88824111ef8b170ba25f3b092fd7ba1f1a..4d313fca766e8ab8f8d6ba404f7faf2fe833e9e6 100644 --- a/source/libs/parser/test/parInsertTest.cpp +++ b/source/libs/parser/test/parInsertTest.cpp @@ -15,6 +15,7 @@ #include +#include "mockCatalogService.h" #include "os.h" #include "parInt.h" @@ -57,6 +58,38 @@ class InsertTest : public Test { return code_; } + int32_t runAsync() { + code_ = parseInsertSyntax(&cxt_, &res_); + if (code_ != TSDB_CODE_SUCCESS) { + cout << "parseInsertSyntax code:" << toString(code_) << ", msg:" << errMagBuf_ << endl; + return code_; + } + + SCatalogReq catalogReq = {0}; + code_ = buildCatalogReq(res_->pMetaCache, &catalogReq); + if (code_ != TSDB_CODE_SUCCESS) { + cout << "buildCatalogReq code:" << toString(code_) << ", msg:" << errMagBuf_ << endl; + return code_; + } + + SMetaData metaData = {0}; + g_mockCatalogService->catalogGetAllMeta(&catalogReq, &metaData); + + code_ = putMetaDataToCache(&catalogReq, &metaData, res_->pMetaCache); + if (code_ != TSDB_CODE_SUCCESS) { + cout << "putMetaDataToCache code:" << toString(code_) << ", msg:" << errMagBuf_ << endl; + return code_; + } + + code_ = parseInsertSql(&cxt_, &res_); + if (code_ != TSDB_CODE_SUCCESS) { + cout << "parseInsertSql code:" << toString(code_) << ", msg:" << errMagBuf_ << endl; + return code_; + } + + return code_; + } + void dumpReslut() { SVnodeModifOpStmt* pStmt = getVnodeModifStmt(res_); size_t num = taosArrayGetSize(pStmt->pDataBlocks); @@ -125,7 +158,7 @@ class InsertTest : public Test { SQuery* res_; }; -// INSERT INTO tb_name VALUES (field1_value, ...) +// INSERT INTO tb_name [(field1_name, ...)] VALUES (field1_value, ...) TEST_F(InsertTest, singleTableSingleRowTest) { setDatabase("root", "test"); @@ -133,6 +166,17 @@ TEST_F(InsertTest, singleTableSingleRowTest) { ASSERT_EQ(run(), TSDB_CODE_SUCCESS); dumpReslut(); checkReslut(1, 1); + + bind("insert into t1 (ts, c1, c2, c3, c4, c5) values (now, 1, 'beijing', 3, 4, 5)"); + ASSERT_EQ(run(), TSDB_CODE_SUCCESS); + + bind("insert into t1 values (now, 1, 'beijing', 3, 4, 5)"); + ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS); + dumpReslut(); + checkReslut(1, 1); + + bind("insert into t1 (ts, c1, c2, c3, c4, c5) values (now, 1, 'beijing', 3, 4, 5)"); + ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS); } // INSERT INTO tb_name VALUES (field1_value, ...)(field1_value, ...) @@ -140,11 +184,16 @@ TEST_F(InsertTest, singleTableMultiRowTest) { setDatabase("root", "test"); bind( - "insert into t1 values (now, 1, 'beijing', 3, 4, 5)(now+1s, 2, 'shanghai', 6, 7, 8)(now+2s, 3, 'guangzhou', 9, " - "10, 11)"); + "insert into t1 values (now, 1, 'beijing', 3, 4, 5)(now+1s, 2, 'shanghai', 6, 7, 8)" + "(now+2s, 3, 'guangzhou', 9, 10, 11)"); ASSERT_EQ(run(), TSDB_CODE_SUCCESS); dumpReslut(); checkReslut(1, 3); + + bind( + "insert into t1 values (now, 1, 'beijing', 3, 4, 5)(now+1s, 2, 'shanghai', 6, 7, 8)" + "(now+2s, 3, 'guangzhou', 9, 10, 11)"); + ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS); } // INSERT INTO tb1_name VALUES (field1_value, ...) tb2_name VALUES (field1_value, ...) @@ -155,6 +204,9 @@ TEST_F(InsertTest, multiTableSingleRowTest) { ASSERT_EQ(run(), TSDB_CODE_SUCCESS); dumpReslut(); checkReslut(2, 1); + + bind("insert into st1s1 values (now, 1, \"beijing\") st1s2 values (now, 10, \"131028\")"); + ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS); } // INSERT INTO tb1_name VALUES (field1_value, ...) tb2_name VALUES (field1_value, ...) @@ -167,6 +219,11 @@ TEST_F(InsertTest, multiTableMultiRowTest) { ASSERT_EQ(run(), TSDB_CODE_SUCCESS); dumpReslut(); checkReslut(2, 3, 2); + + bind( + "insert into st1s1 values (now, 1, \"beijing\")(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")" + " st1s2 values (now, 10, \"131028\")(now+1s, 20, \"132028\")"); + ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS); } // INSERT INTO @@ -181,6 +238,21 @@ TEST_F(InsertTest, autoCreateTableTest) { ASSERT_EQ(run(), TSDB_CODE_SUCCESS); dumpReslut(); checkReslut(1, 3); + + bind( + "insert into st1s1 using st1 (tag1, tag2) tags(1, 'wxy') values (now, 1, \"beijing\")" + "(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")"); + ASSERT_EQ(run(), TSDB_CODE_SUCCESS); + + bind( + "insert into st1s1 using st1 tags(1, 'wxy') values (now, 1, \"beijing\")(now+1s, 2, \"shanghai\")(now+2s, 3, " + "\"guangzhou\")"); + ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS); + + bind( + "insert into st1s1 using st1 (tag1, tag2) tags(1, 'wxy') values (now, 1, \"beijing\")" + "(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")"); + ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS); } TEST_F(InsertTest, toleranceTest) { @@ -190,4 +262,9 @@ TEST_F(InsertTest, toleranceTest) { ASSERT_NE(run(), TSDB_CODE_SUCCESS); bind("insert into t"); ASSERT_NE(run(), TSDB_CODE_SUCCESS); + + bind("insert into"); + ASSERT_NE(runAsync(), TSDB_CODE_SUCCESS); + bind("insert into t"); + ASSERT_NE(runAsync(), TSDB_CODE_SUCCESS); } diff --git a/source/libs/parser/test/parSelectTest.cpp b/source/libs/parser/test/parSelectTest.cpp index b68ef2c591e0497c6f32a9ce69c9e1f229b5f92f..a675bb936fb42271ec474027e7c31a8c2ad21652 100644 --- a/source/libs/parser/test/parSelectTest.cpp +++ b/source/libs/parser/test/parSelectTest.cpp @@ -44,6 +44,8 @@ TEST_F(ParserSelectTest, constant) { "timestamp '2022-02-09 17:30:20', true, false, 15s FROM t1"); run("SELECT 123 + 45 FROM t1 WHERE 2 - 1"); + + run("SELECT * FROM t1 WHERE -2"); } TEST_F(ParserSelectTest, expression) { @@ -76,6 +78,12 @@ TEST_F(ParserSelectTest, pseudoColumnSemanticCheck) { run("SELECT TBNAME FROM (SELECT * FROM st1s1)", TSDB_CODE_PAR_INVALID_TBNAME, PARSER_STAGE_TRANSLATE); } +TEST_F(ParserSelectTest, aggFunc) { + useDb("root", "test"); + + run("SELECT LEASTSQUARES(c1, -1, 1) FROM t1"); +} + TEST_F(ParserSelectTest, multiResFunc) { useDb("root", "test"); @@ -121,13 +129,13 @@ TEST_F(ParserSelectTest, selectFunc) { run("SELECT MAX(c1), c2 FROM t1 STATE_WINDOW(c3)"); } -TEST_F(ParserSelectTest, nonstdFunc) { +TEST_F(ParserSelectTest, IndefiniteRowsFunc) { useDb("root", "test"); run("SELECT DIFF(c1) FROM t1"); } -TEST_F(ParserSelectTest, nonstdFuncSemanticCheck) { +TEST_F(ParserSelectTest, IndefiniteRowsFuncSemanticCheck) { useDb("root", "test"); run("SELECT DIFF(c1), c2 FROM t1", TSDB_CODE_PAR_NOT_ALLOWED_FUNC, PARSER_STAGE_TRANSLATE); @@ -141,6 +149,14 @@ TEST_F(ParserSelectTest, nonstdFuncSemanticCheck) { // run("SELECT DIFF(c1) FROM t1 INTERVAL(10s)"); } +TEST_F(ParserSelectTest, useDefinedFunc) { + useDb("root", "test"); + + run("SELECT udf1(c1) FROM t1"); + + run("SELECT udf2(c1) FROM t1 GROUP BY c2"); +} + TEST_F(ParserSelectTest, groupBy) { useDb("root", "test"); diff --git a/source/libs/parser/test/parTestMain.cpp b/source/libs/parser/test/parTestMain.cpp index ebc83fb21981e56666b82ec6a5a08a63cd7f0c87..820b8cca3cdc02633982a3ea797aa605db1e3fd3 100644 --- a/source/libs/parser/test/parTestMain.cpp +++ b/source/libs/parser/test/parTestMain.cpp @@ -37,6 +37,7 @@ class ParserEnv : public testing::Environment { virtual void SetUp() { initMetaDataEnv(); generateMetaData(); + initLog(TD_TMP_DIR_PATH "td"); } virtual void TearDown() { @@ -47,16 +48,55 @@ class ParserEnv : public testing::Environment { ParserEnv() {} virtual ~ParserEnv() {} + + private: + void initLog(const char* path) { + int32_t logLevel = getLogLevel(); + dDebugFlag = logLevel; + vDebugFlag = logLevel; + mDebugFlag = logLevel; + cDebugFlag = logLevel; + jniDebugFlag = logLevel; + tmrDebugFlag = logLevel; + uDebugFlag = logLevel; + rpcDebugFlag = logLevel; + qDebugFlag = logLevel; + wDebugFlag = logLevel; + sDebugFlag = logLevel; + tsdbDebugFlag = logLevel; + tsLogEmbedded = 1; + tsAsyncLog = 0; + + taosRemoveDir(path); + taosMkDir(path); + tstrncpy(tsLogDir, path, PATH_MAX); + if (taosInitLog("taoslog", 1) != 0) { + std::cout << "failed to init log file" << std::endl; + } + } }; static void parseArg(int argc, char* argv[]) { - int opt = 0; - const char* optstring = ""; - static struct option long_options[] = {{"dump", no_argument, NULL, 'd'}, {0, 0, 0, 0}}; + int opt = 0; + const char* optstring = ""; + // clang-format off + static struct option long_options[] = { + {"dump", no_argument, NULL, 'd'}, + {"async", required_argument, NULL, 'a'}, + {"skipSql", required_argument, NULL, 's'}, + {0, 0, 0, 0} + }; + // clang-format on while ((opt = getopt_long(argc, argv, optstring, long_options, NULL)) != -1) { switch (opt) { case 'd': - g_isDump = true; + g_dump = true; + break; + case 'a': + setAsyncFlag(optarg); + break; + case 's': + setSkipSqlNum(optarg); break; default: break; diff --git a/source/libs/parser/test/parTestUtil.cpp b/source/libs/parser/test/parTestUtil.cpp index 250ac1c52885f10d45a4ef96321d410f115b9255..fab7ed35b1cb408a5cdd6f455994da07a26596fd 100644 --- a/source/libs/parser/test/parTestUtil.cpp +++ b/source/libs/parser/test/parTestUtil.cpp @@ -17,7 +17,10 @@ #include #include +#include +#include "catalog.h" +#include "mockCatalogService.h" #include "parInt.h" using namespace std; @@ -41,22 +44,40 @@ namespace ParserTest { } \ } while (0); -bool g_isDump = false; +bool g_dump = false; +bool g_testAsyncApis = true; +int32_t g_logLevel = 131; +int32_t g_skipSql = 0; + +void setAsyncFlag(const char* pFlag) { g_testAsyncApis = stoi(pFlag) > 0 ? true : false; } +void setSkipSqlNum(const char* pNum) { g_skipSql = stoi(pNum); } struct TerminateFlag : public exception { const char* what() const throw() { return "success and terminate"; } }; +void setLogLevel(const char* pLogLevel) { g_logLevel = stoi(pLogLevel); } + +int32_t getLogLevel() { return g_logLevel; } + class ParserTestBaseImpl { public: ParserTestBaseImpl(ParserTestBase* pBase) : pBase_(pBase) {} + void login(const std::string& user) { caseEnv_.user_ = user; } + void useDb(const string& acctId, const string& db) { caseEnv_.acctId_ = acctId; caseEnv_.db_ = db; + caseEnv_.nsql_ = g_skipSql; } void run(const string& sql, int32_t expect, ParserStage checkStage) { + if (caseEnv_.nsql_ > 0) { + --(caseEnv_.nsql_); + return; + } + reset(expect, checkStage); try { SParseContext cxt = {0}; @@ -65,11 +86,13 @@ class ParserTestBaseImpl { SQuery* pQuery = nullptr; doParse(&cxt, &pQuery); + doAuthenticate(&cxt, pQuery); + doTranslate(&cxt, pQuery); doCalculateConstant(&cxt, pQuery); - if (g_isDump) { + if (g_dump) { dump(); } } catch (const TerminateFlag& e) { @@ -79,12 +102,20 @@ class ParserTestBaseImpl { dump(); throw; } + + if (g_testAsyncApis) { + runAsync(sql, expect, checkStage); + } } private: struct caseEnv { - string acctId_; - string db_; + string acctId_; + string user_; + string db_; + int32_t nsql_; + + caseEnv() : user_("wangxiaoyu"), nsql_(0) {} }; struct stmtEnv { @@ -144,16 +175,19 @@ class ParserTestBaseImpl { cout << res_.calcConstAst_ << endl; } - void setParseContext(const string& sql, SParseContext* pCxt) { + void setParseContext(const string& sql, SParseContext* pCxt, bool async = false) { stmtEnv_.sql_ = sql; transform(stmtEnv_.sql_.begin(), stmtEnv_.sql_.end(), stmtEnv_.sql_.begin(), ::tolower); pCxt->acctId = atoi(caseEnv_.acctId_.c_str()); pCxt->db = caseEnv_.db_.c_str(); + pCxt->pUser = caseEnv_.user_.c_str(); + pCxt->isSuperUser = caseEnv_.user_ == "root"; pCxt->pSql = stmtEnv_.sql_.c_str(); pCxt->sqlLen = stmtEnv_.sql_.length(); pCxt->pMsg = stmtEnv_.msgBuf_.data(); pCxt->msgLen = stmtEnv_.msgBuf_.max_size(); + pCxt->async = async; } void doParse(SParseContext* pCxt, SQuery** pQuery) { @@ -162,6 +196,25 @@ class ParserTestBaseImpl { res_.parsedAst_ = toString((*pQuery)->pRoot); } + void doCollectMetaKey(SParseContext* pCxt, SQuery* pQuery) { + DO_WITH_THROW(collectMetaKey, pCxt, pQuery); + ASSERT_NE(pQuery->pMetaCache, nullptr); + } + + void doBuildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq) { + DO_WITH_THROW(buildCatalogReq, pMetaCache, pCatalogReq); + } + + void doGetAllMeta(const SCatalogReq* pCatalogReq, SMetaData* pMetaData) { + DO_WITH_THROW(g_mockCatalogService->catalogGetAllMeta, pCatalogReq, pMetaData); + } + + void doPutMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache) { + DO_WITH_THROW(putMetaDataToCache, pCatalogReq, pMetaData, pMetaCache); + } + + void doAuthenticate(SParseContext* pCxt, SQuery* pQuery) { DO_WITH_THROW(authenticate, pCxt, pQuery); } + void doTranslate(SParseContext* pCxt, SQuery* pQuery) { DO_WITH_THROW(translate, pCxt, pQuery); checkQuery(pQuery, PARSER_STAGE_TRANSLATE); @@ -184,6 +237,59 @@ class ParserTestBaseImpl { void checkQuery(const SQuery* pQuery, ParserStage stage) { pBase_->checkDdl(pQuery, stage); } + void runAsync(const string& sql, int32_t expect, ParserStage checkStage) { + reset(expect, checkStage); + try { + SParseContext cxt = {0}; + setParseContext(sql, &cxt, true); + + SQuery* pQuery = nullptr; + doParse(&cxt, &pQuery); + + doCollectMetaKey(&cxt, pQuery); + + SCatalogReq catalogReq = {0}; + doBuildCatalogReq(pQuery->pMetaCache, &catalogReq); + + string err; + thread t1([&]() { + try { + SMetaData metaData = {0}; + doGetAllMeta(&catalogReq, &metaData); + + doPutMetaDataToCache(&catalogReq, &metaData, pQuery->pMetaCache); + + doAuthenticate(&cxt, pQuery); + + doTranslate(&cxt, pQuery); + + doCalculateConstant(&cxt, pQuery); + } catch (const TerminateFlag& e) { + // success and terminate + } catch (const runtime_error& e) { + err = e.what(); + } catch (...) { + err = "unknown error"; + } + }); + + t1.join(); + if (!err.empty()) { + throw runtime_error(err); + } + + if (g_dump) { + dump(); + } + } catch (const TerminateFlag& e) { + // success and terminate + return; + } catch (...) { + dump(); + throw; + } + } + caseEnv caseEnv_; stmtEnv stmtEnv_; stmtRes res_; @@ -194,6 +300,8 @@ ParserTestBase::ParserTestBase() : impl_(new ParserTestBaseImpl(this)) {} ParserTestBase::~ParserTestBase() {} +void ParserTestBase::login(const std::string& user) { return impl_->login(user); } + void ParserTestBase::useDb(const std::string& acctId, const std::string& db) { impl_->useDb(acctId, db); } void ParserTestBase::run(const std::string& sql, int32_t expect, ParserStage checkStage) { diff --git a/source/libs/parser/test/parTestUtil.h b/source/libs/parser/test/parTestUtil.h index c7d7ead8dbc8a5d6b7a45cde0552e9e979ea07ec..44be7a24746ecde078f69555c88e4d85344b8313 100644 --- a/source/libs/parser/test/parTestUtil.h +++ b/source/libs/parser/test/parTestUtil.h @@ -34,6 +34,7 @@ class ParserTestBase : public testing::Test { ParserTestBase(); virtual ~ParserTestBase(); + void login(const std::string& user); void useDb(const std::string& acctId, const std::string& db); void run(const std::string& sql, int32_t expect = TSDB_CODE_SUCCESS, ParserStage checkStage = PARSER_STAGE_ALL); @@ -63,7 +64,12 @@ class ParserDdlTest : public ParserTestBase { std::function checkDdl_; }; -extern bool g_isDump; +extern bool g_dump; + +extern void setAsyncFlag(const char* pFlag); +extern void setLogLevel(const char* pLogLevel); +extern int32_t getLogLevel(); +extern void setSkipSqlNum(const char* pNum); } // namespace ParserTest diff --git a/source/libs/planner/CMakeLists.txt b/source/libs/planner/CMakeLists.txt index f0bf32bf173b2731e49636dfdca27b8907996844..ad981073ca7fa7ff26129783a5e337155e2e020d 100644 --- a/source/libs/planner/CMakeLists.txt +++ b/source/libs/planner/CMakeLists.txt @@ -8,7 +8,7 @@ target_include_directories( target_link_libraries( planner - PRIVATE os util nodes catalog cjson parser function qcom scalar + PRIVATE os util nodes catalog cjson parser function qcom scalar index PUBLIC transport ) diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index 6c567fd4ab90729277532b6e95f9d55ba1e787d2..467b26b7c4af61a8f0cca3d706f34c0133995fe3 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -321,6 +321,7 @@ static int32_t createJoinLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect } pJoin->joinType = pJoinTable->joinType; + pJoin->isSingleTableJoin = pJoinTable->table.singleTable; int32_t code = TSDB_CODE_SUCCESS; diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 8645225c04bc82e1ffa9c36db0ab482a8dd6b5a3..adc07fcd0d6b6a0c6f98fdf5032151dab3ae71f3 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -15,6 +15,7 @@ #include "filter.h" #include "functionMgt.h" +#include "index.h" #include "planInt.h" #define OPTIMIZE_FLAG_MASK(n) (1 << n) @@ -222,6 +223,9 @@ static void setScanWindowInfo(SScanLogicNode* pScan) { pScan->sliding = ((SWindowLogicNode*)pScan->node.pParent)->sliding; pScan->intervalUnit = ((SWindowLogicNode*)pScan->node.pParent)->intervalUnit; pScan->slidingUnit = ((SWindowLogicNode*)pScan->node.pParent)->slidingUnit; + pScan->triggerType = ((SWindowLogicNode*)pScan->node.pParent)->triggerType; + pScan->watermark = ((SWindowLogicNode*)pScan->node.pParent)->watermark; + pScan->tsColId = ((SColumnNode*)((SWindowLogicNode*)pScan->node.pParent)->pTspk)->colId; } } @@ -313,22 +317,53 @@ static EDealRes cpdIsPrimaryKeyCondImpl(SNode* pNode, void* pContext) { } static bool cpdIsPrimaryKeyCond(SNode* pNode) { + if (QUERY_NODE_LOGIC_CONDITION == nodeType(pNode)) { + return false; + } bool isPrimaryKeyCond = false; nodesWalkExpr(pNode, cpdIsPrimaryKeyCondImpl, &isPrimaryKeyCond); return isPrimaryKeyCond; } -static int32_t cpdPartitionScanLogicCond(SScanLogicNode* pScan, SNode** pPrimaryKeyCond, SNode** pOtherCond) { +static EDealRes cpdIsTagCondImpl(SNode* pNode, void* pContext) { + if (QUERY_NODE_COLUMN == nodeType(pNode)) { + *((bool*)pContext) = ((COLUMN_TYPE_TAG == ((SColumnNode*)pNode)->colType) ? true : false); + return *((bool*)pContext) ? DEAL_RES_CONTINUE : DEAL_RES_END; + } + return DEAL_RES_CONTINUE; +} + +static bool cpdIsTagCond(SNode* pNode) { + if (QUERY_NODE_LOGIC_CONDITION == nodeType(pNode)) { + return false; + } + bool isTagCond = false; + nodesWalkExpr(pNode, cpdIsTagCondImpl, &isTagCond); + return isTagCond; +} + +static int32_t cpdPartitionScanLogicCond(SScanLogicNode* pScan, SNode** pPrimaryKeyCond, SNode** pTagCond, + SNode** pOtherCond) { SLogicConditionNode* pLogicCond = (SLogicConditionNode*)pScan->node.pConditions; + if (LOGIC_COND_TYPE_AND != pLogicCond->condType) { + *pPrimaryKeyCond = NULL; + *pOtherCond = pScan->node.pConditions; + pScan->node.pConditions = NULL; + return TSDB_CODE_SUCCESS; + } + int32_t code = TSDB_CODE_SUCCESS; SNodeList* pPrimaryKeyConds = NULL; + SNodeList* pTagConds = NULL; SNodeList* pOtherConds = NULL; SNode* pCond = NULL; FOREACH(pCond, pLogicCond->pParameterList) { if (cpdIsPrimaryKeyCond(pCond)) { code = nodesListMakeAppend(&pPrimaryKeyConds, nodesCloneNode(pCond)); + } else if (cpdIsTagCond(pScan->node.pConditions)) { + code = nodesListMakeAppend(&pTagConds, nodesCloneNode(pCond)); } else { code = nodesListMakeAppend(&pOtherConds, nodesCloneNode(pCond)); } @@ -338,37 +373,46 @@ static int32_t cpdPartitionScanLogicCond(SScanLogicNode* pScan, SNode** pPrimary } SNode* pTempPrimaryKeyCond = NULL; + SNode* pTempTagCond = NULL; SNode* pTempOtherCond = NULL; if (TSDB_CODE_SUCCESS == code) { code = cpdMergeConds(&pTempPrimaryKeyCond, &pPrimaryKeyConds); } + if (TSDB_CODE_SUCCESS == code) { + code = cpdMergeConds(&pTempTagCond, &pTagConds); + } if (TSDB_CODE_SUCCESS == code) { code = cpdMergeConds(&pTempOtherCond, &pOtherConds); } if (TSDB_CODE_SUCCESS == code) { *pPrimaryKeyCond = pTempPrimaryKeyCond; + *pTagCond = pTempTagCond; *pOtherCond = pTempOtherCond; nodesDestroyNode(pScan->node.pConditions); pScan->node.pConditions = NULL; } else { nodesDestroyList(pPrimaryKeyConds); + nodesDestroyList(pTagConds); nodesDestroyList(pOtherConds); nodesDestroyNode(pTempPrimaryKeyCond); + nodesDestroyNode(pTempTagCond); nodesDestroyNode(pTempOtherCond); } return code; } -static int32_t cpdPartitionScanCond(SScanLogicNode* pScan, SNode** pPrimaryKeyCond, SNode** pOtherCond) { - if (QUERY_NODE_LOGIC_CONDITION == nodeType(pScan->node.pConditions) && - LOGIC_COND_TYPE_AND == ((SLogicConditionNode*)pScan->node.pConditions)->condType) { - return cpdPartitionScanLogicCond(pScan, pPrimaryKeyCond, pOtherCond); +static int32_t cpdPartitionScanCond(SScanLogicNode* pScan, SNode** pPrimaryKeyCond, SNode** pTagCond, + SNode** pOtherCond) { + if (QUERY_NODE_LOGIC_CONDITION == nodeType(pScan->node.pConditions)) { + return cpdPartitionScanLogicCond(pScan, pPrimaryKeyCond, pTagCond, pOtherCond); } if (cpdIsPrimaryKeyCond(pScan->node.pConditions)) { *pPrimaryKeyCond = pScan->node.pConditions; + } else if (cpdIsTagCond(pScan->node.pConditions)) { + *pTagCond = pScan->node.pConditions; } else { *pOtherCond = pScan->node.pConditions; } @@ -391,6 +435,32 @@ static int32_t cpdCalcTimeRange(SScanLogicNode* pScan, SNode** pPrimaryKeyCond, return code; } +static int32_t cpdApplyTagIndex(SScanLogicNode* pScan, SNode** pTagCond, SNode** pOtherCond) { + int32_t code = TSDB_CODE_SUCCESS; + SIdxFltStatus idxStatus = idxGetFltStatus(*pTagCond); + switch (idxStatus) { + case SFLT_NOT_INDEX: + code = cpdCondAppend(pOtherCond, pTagCond); + break; + case SFLT_COARSE_INDEX: + pScan->pTagCond = nodesCloneNode(*pTagCond); + if (NULL == pScan->pTagCond) { + code = TSDB_CODE_OUT_OF_MEMORY; + break; + } + code = cpdCondAppend(pOtherCond, pTagCond); + break; + case SFLT_ACCURATE_INDEX: + pScan->pTagCond = *pTagCond; + *pTagCond = NULL; + break; + default: + code = TSDB_CODE_FAILED; + break; + } + return code; +} + static int32_t cpdOptimizeScanCondition(SOptimizeContext* pCxt, SScanLogicNode* pScan) { if (NULL == pScan->node.pConditions || OPTIMIZE_FLAG_TEST_MASK(pScan->node.optimizedFlag, OPTIMIZE_FLAG_CPD) || TSDB_SYSTEM_TABLE == pScan->pMeta->tableType) { @@ -398,11 +468,15 @@ static int32_t cpdOptimizeScanCondition(SOptimizeContext* pCxt, SScanLogicNode* } SNode* pPrimaryKeyCond = NULL; + SNode* pTagCond = NULL; SNode* pOtherCond = NULL; - int32_t code = cpdPartitionScanCond(pScan, &pPrimaryKeyCond, &pOtherCond); + int32_t code = cpdPartitionScanCond(pScan, &pPrimaryKeyCond, &pTagCond, &pOtherCond); if (TSDB_CODE_SUCCESS == code && NULL != pPrimaryKeyCond) { code = cpdCalcTimeRange(pScan, &pPrimaryKeyCond, &pOtherCond); } + if (TSDB_CODE_SUCCESS == code && NULL != pTagCond) { + code = cpdApplyTagIndex(pScan, &pTagCond, &pOtherCond); + } if (TSDB_CODE_SUCCESS == code) { pScan->node.pConditions = pOtherCond; } @@ -618,30 +692,6 @@ static bool cpdContainPrimaryKeyEqualCond(SJoinLogicNode* pJoin, SNode* pCond) { } } -// static int32_t cpdCheckOpCond(SOptimizeContext* pCxt, SJoinLogicNode* pJoin, SNode* pOnCond) { -// if (!cpdIsPrimaryKeyEqualCond(pJoin, pOnCond)) { -// return generateUsageErrMsg(pCxt->pPlanCxt->pMsg, pCxt->pPlanCxt->msgLen, TSDB_CODE_PLAN_EXPECTED_TS_EQUAL); -// } -// return TSDB_CODE_SUCCESS; -// } - -// static int32_t cpdCheckLogicCond(SOptimizeContext* pCxt, SJoinLogicNode* pJoin, SLogicConditionNode* pOnCond) { -// if (LOGIC_COND_TYPE_AND != pOnCond->condType) { -// return generateUsageErrMsg(pCxt->pPlanCxt->pMsg, pCxt->pPlanCxt->msgLen, TSDB_CODE_PLAN_EXPECTED_TS_EQUAL); -// } -// bool hasPrimaryKeyEqualCond = false; -// SNode* pCond = NULL; -// FOREACH(pCond, pOnCond->pParameterList) { -// if (cpdIsPrimaryKeyEqualCond(pJoin, pCond)) { -// hasPrimaryKeyEqualCond = true; -// } -// } -// if (!hasPrimaryKeyEqualCond) { -// return generateUsageErrMsg(pCxt->pPlanCxt->pMsg, pCxt->pPlanCxt->msgLen, TSDB_CODE_PLAN_EXPECTED_TS_EQUAL); -// } -// return TSDB_CODE_SUCCESS; -// } - static int32_t cpdCheckJoinOnCond(SOptimizeContext* pCxt, SJoinLogicNode* pJoin) { if (NULL == pJoin->pOnConditions) { return generateUsageErrMsg(pCxt->pPlanCxt->pMsg, pCxt->pPlanCxt->msgLen, TSDB_CODE_PLAN_NOT_SUPPORT_CROSS_JOIN); @@ -650,11 +700,6 @@ static int32_t cpdCheckJoinOnCond(SOptimizeContext* pCxt, SJoinLogicNode* pJoin) return generateUsageErrMsg(pCxt->pPlanCxt->pMsg, pCxt->pPlanCxt->msgLen, TSDB_CODE_PLAN_EXPECTED_TS_EQUAL); } return TSDB_CODE_SUCCESS; - // if (QUERY_NODE_LOGIC_CONDITION == nodeType(pJoin->pOnConditions)) { - // return cpdCheckLogicCond(pCxt, pJoin, (SLogicConditionNode*)pJoin->pOnConditions); - // } else { - // return cpdCheckOpCond(pCxt, pJoin, pJoin->pOnConditions); - // } } static int32_t cpdPushJoinCondition(SOptimizeContext* pCxt, SJoinLogicNode* pJoin) { diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index affe9ef2f61f0afce95ba7f1feec5f65f23a7f1f..a45eabefb9f1f1f7fe9c97a3f8c7cf16385d2fc3 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -411,7 +411,7 @@ static int32_t createScanCols(SPhysiPlanContext* pCxt, SScanPhysiNode* pScanPhys return sortScanCols(pScanPhysiNode->pScanCols); } -static int32_t createScanPhysiNodeFinalize(SPhysiPlanContext* pCxt, SScanLogicNode* pScanLogicNode, +static int32_t createScanPhysiNodeFinalize(SPhysiPlanContext* pCxt, SSubplan* pSubplan, SScanLogicNode* pScanLogicNode, SScanPhysiNode* pScanPhysiNode, SPhysiNode** pPhyNode) { int32_t code = createScanCols(pCxt, pScanPhysiNode, pScanLogicNode->pScanCols); if (TSDB_CODE_SUCCESS == code) { @@ -438,6 +438,12 @@ static int32_t createScanPhysiNodeFinalize(SPhysiPlanContext* pCxt, SScanLogicNo pScanPhysiNode->uid = pScanLogicNode->pMeta->uid; pScanPhysiNode->tableType = pScanLogicNode->pMeta->tableType; memcpy(&pScanPhysiNode->tableName, &pScanLogicNode->tableName, sizeof(SName)); + if (NULL != pScanLogicNode->pTagCond) { + pSubplan->pTagCond = nodesCloneNode(pScanLogicNode->pTagCond); + if (NULL == pSubplan->pTagCond) { + code = TSDB_CODE_OUT_OF_MEMORY; + } + } } if (TSDB_CODE_SUCCESS == code) { @@ -463,7 +469,7 @@ static int32_t createTagScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubpla } vgroupInfoToNodeAddr(pScanLogicNode->pVgroupList->vgroups, &pSubplan->execNode); taosArrayPush(pCxt->pExecNodeList, &pSubplan->execNode); - return createScanPhysiNodeFinalize(pCxt, pScanLogicNode, (SScanPhysiNode*)pTagScan, pPhyNode); + return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pTagScan, pPhyNode); } static int32_t createTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubplan, SScanLogicNode* pScanLogicNode, @@ -497,8 +503,11 @@ static int32_t createTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubp pTableScan->sliding = pScanLogicNode->sliding; pTableScan->intervalUnit = pScanLogicNode->intervalUnit; pTableScan->slidingUnit = pScanLogicNode->slidingUnit; + pTableScan->triggerType = pScanLogicNode->triggerType; + pTableScan->watermark = pScanLogicNode->watermark; + pTableScan->tsColId = pScanLogicNode->tsColId; - return createScanPhysiNodeFinalize(pCxt, pScanLogicNode, (SScanPhysiNode*)pTableScan, pPhyNode); + return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pTableScan, pPhyNode); } static int32_t createSystemTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubplan, @@ -522,7 +531,7 @@ static int32_t createSystemTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pScan->mgmtEpSet = pCxt->pPlanCxt->mgmtEpSet; tNameGetFullDbName(&pScanLogicNode->tableName, pSubplan->dbFName); - return createScanPhysiNodeFinalize(pCxt, pScanLogicNode, (SScanPhysiNode*)pScan, pPhyNode); + return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pScan, pPhyNode); } static int32_t createStreamScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubplan, SScanLogicNode* pScanLogicNode, @@ -599,14 +608,17 @@ typedef struct SRewritePrecalcExprsCxt { static EDealRes collectAndRewrite(SRewritePrecalcExprsCxt* pCxt, SNode** pNode) { SNode* pExpr = nodesCloneNode(*pNode); if (NULL == pExpr) { + pCxt->errCode = TSDB_CODE_OUT_OF_MEMORY; return DEAL_RES_ERROR; } if (nodesListAppend(pCxt->pPrecalcExprs, pExpr)) { + pCxt->errCode = TSDB_CODE_OUT_OF_MEMORY; nodesDestroyNode(pExpr); return DEAL_RES_ERROR; } SColumnNode* pCol = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN); if (NULL == pCol) { + pCxt->errCode = TSDB_CODE_OUT_OF_MEMORY; nodesDestroyNode(pExpr); return DEAL_RES_ERROR; } @@ -624,16 +636,45 @@ static EDealRes collectAndRewrite(SRewritePrecalcExprsCxt* pCxt, SNode** pNode) return DEAL_RES_IGNORE_CHILD; } +static int32_t rewriteValueToOperator(SRewritePrecalcExprsCxt* pCxt, SNode** pNode) { + SOperatorNode* pOper = (SOperatorNode*)nodesMakeNode(QUERY_NODE_OPERATOR); + if (NULL == pOper) { + return TSDB_CODE_OUT_OF_MEMORY; + } + pOper->pLeft = nodesMakeNode(QUERY_NODE_LEFT_VALUE); + if (NULL == pOper->pLeft) { + nodesDestroyNode(pOper); + return TSDB_CODE_OUT_OF_MEMORY; + } + SValueNode* pVal = (SValueNode*)*pNode; + pOper->node.resType = pVal->node.resType; + strcpy(pOper->node.aliasName, pVal->node.aliasName); + pOper->opType = OP_TYPE_ASSIGN; + pOper->pRight = *pNode; + *pNode = (SNode*)pOper; + return TSDB_CODE_SUCCESS; +} + static EDealRes doRewritePrecalcExprs(SNode** pNode, void* pContext) { SRewritePrecalcExprsCxt* pCxt = (SRewritePrecalcExprsCxt*)pContext; switch (nodeType(*pNode)) { + case QUERY_NODE_VALUE: { + if (((SValueNode*)*pNode)->notReserved) { + break; + } + pCxt->errCode = rewriteValueToOperator(pCxt, pNode); + if (TSDB_CODE_SUCCESS != pCxt->errCode) { + return DEAL_RES_ERROR; + } + return collectAndRewrite(pCxt, pNode); + } case QUERY_NODE_OPERATOR: case QUERY_NODE_LOGIC_CONDITION: { - return collectAndRewrite(pContext, pNode); + return collectAndRewrite(pCxt, pNode); } case QUERY_NODE_FUNCTION: { if (fmIsScalarFunc(((SFunctionNode*)(*pNode))->funcId)) { - return collectAndRewrite(pContext, pNode); + return collectAndRewrite(pCxt, pNode); } } default: @@ -677,9 +718,8 @@ static int32_t rewritePrecalcExprs(SPhysiPlanContext* pCxt, SNodeList* pList, SN } SRewritePrecalcExprsCxt cxt = {.errCode = TSDB_CODE_SUCCESS, .pPrecalcExprs = *pPrecalcExprs}; nodesRewriteExprs(*pRewrittenList, doRewritePrecalcExprs, &cxt); - if (0 == LIST_LENGTH(cxt.pPrecalcExprs)) { - nodesDestroyList(cxt.pPrecalcExprs); - *pPrecalcExprs = NULL; + if (0 == LIST_LENGTH(cxt.pPrecalcExprs) || TSDB_CODE_SUCCESS != cxt.errCode) { + DESTORY_LIST(*pPrecalcExprs); } return cxt.errCode; } @@ -908,7 +948,8 @@ static int32_t createIntervalPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChil static int32_t createSessionWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren, SWindowLogicNode* pWindowLogicNode, SPhysiNode** pPhyNode) { SSessionWinodwPhysiNode* pSession = (SSessionWinodwPhysiNode*)makePhysiNode( - pCxt, getPrecision(pChildren), (SLogicNode*)pWindowLogicNode, QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW); + pCxt, getPrecision(pChildren), (SLogicNode*)pWindowLogicNode, + (pCxt->pPlanCxt->streamQuery ? QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW : QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW)); if (NULL == pSession) { return TSDB_CODE_OUT_OF_MEMORY; } diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c index 1a97d9ab1b898b9b4a5dae1d4bade0e9b6d87bb8..ea149f8363955233fc45eb60a7d71378c8198d17 100644 --- a/source/libs/planner/src/planSpliter.c +++ b/source/libs/planner/src/planSpliter.c @@ -18,7 +18,6 @@ #define SPLIT_FLAG_MASK(n) (1 << n) #define SPLIT_FLAG_STS SPLIT_FLAG_MASK(0) -#define SPLIT_FLAG_CTJ SPLIT_FLAG_MASK(1) #define SPLIT_FLAG_SET_MASK(val, mask) (val) |= (mask) #define SPLIT_FLAG_TEST_MASK(val, mask) (((val) & (mask)) != 0) @@ -42,7 +41,8 @@ typedef struct SStsInfo { } SStsInfo; typedef struct SCtjInfo { - SScanLogicNode* pScan; + SJoinLogicNode* pJoin; + SLogicNode* pSplitNode; SLogicSubplan* pSubplan; } SCtjInfo; @@ -58,7 +58,7 @@ typedef struct SUnInfo { typedef bool (*FSplFindSplitNode)(SLogicSubplan* pSubplan, void* pInfo); -static SLogicSubplan* splCreateScanSubplan(SSplitContext* pCxt, SScanLogicNode* pScan, int32_t flag) { +static SLogicSubplan* splCreateSubplan(SSplitContext* pCxt, SLogicNode* pNode, int32_t flag) { SLogicSubplan* pSubplan = nodesMakeNode(QUERY_NODE_LOGIC_SUBPLAN); if (NULL == pSubplan) { return NULL; @@ -66,35 +66,37 @@ static SLogicSubplan* splCreateScanSubplan(SSplitContext* pCxt, SScanLogicNode* pSubplan->id.queryId = pCxt->queryId; pSubplan->id.groupId = pCxt->groupId; pSubplan->subplanType = SUBPLAN_TYPE_SCAN; - pSubplan->pNode = (SLogicNode*)nodesCloneNode(pScan); - TSWAP(pSubplan->pVgroupList, ((SScanLogicNode*)pSubplan->pNode)->pVgroupList); + pSubplan->pNode = (SLogicNode*)nodesCloneNode(pNode); + if (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pNode)) { + TSWAP(pSubplan->pVgroupList, ((SScanLogicNode*)pSubplan->pNode)->pVgroupList); + } SPLIT_FLAG_SET_MASK(pSubplan->splitFlag, flag); return pSubplan; } -static int32_t splCreateExchangeNode(SSplitContext* pCxt, SLogicSubplan* pSubplan, SScanLogicNode* pScan, +static int32_t splCreateExchangeNode(SSplitContext* pCxt, SLogicSubplan* pSubplan, SLogicNode* pSplitNode, ESubplanType subplanType) { SExchangeLogicNode* pExchange = nodesMakeNode(QUERY_NODE_LOGIC_PLAN_EXCHANGE); if (NULL == pExchange) { return TSDB_CODE_OUT_OF_MEMORY; } pExchange->srcGroupId = pCxt->groupId; - pExchange->precision = pScan->pMeta->tableInfo.precision; - pExchange->node.pTargets = nodesCloneList(pScan->node.pTargets); + pExchange->precision = pSplitNode->precision; + pExchange->node.pTargets = nodesCloneList(pSplitNode->pTargets); if (NULL == pExchange->node.pTargets) { return TSDB_CODE_OUT_OF_MEMORY; } - pSubplan->subplanType = SUBPLAN_TYPE_MERGE; + pSubplan->subplanType = subplanType; - if (NULL == pScan->node.pParent) { + if (NULL == pSplitNode->pParent) { pSubplan->pNode = (SLogicNode*)pExchange; return TSDB_CODE_SUCCESS; } SNode* pNode; - FOREACH(pNode, pScan->node.pParent->pChildren) { - if (nodesEqualNode(pNode, pScan)) { + FOREACH(pNode, pSplitNode->pParent->pChildren) { + if (nodesEqualNode(pNode, pSplitNode)) { REPLACE_NODE(pExchange); nodesDestroyNode(pNode); return TSDB_CODE_SUCCESS; @@ -148,33 +150,31 @@ static int32_t stsSplit(SSplitContext* pCxt, SLogicSubplan* pSubplan) { if (!splMatch(pCxt, pSubplan, SPLIT_FLAG_STS, (FSplFindSplitNode)stsFindSplitNode, &info)) { return TSDB_CODE_SUCCESS; } - int32_t code = - nodesListMakeStrictAppend(&info.pSubplan->pChildren, splCreateScanSubplan(pCxt, info.pScan, SPLIT_FLAG_STS)); + int32_t code = nodesListMakeStrictAppend(&info.pSubplan->pChildren, + splCreateSubplan(pCxt, (SLogicNode*)info.pScan, SPLIT_FLAG_STS)); if (TSDB_CODE_SUCCESS == code) { - code = splCreateExchangeNode(pCxt, info.pSubplan, info.pScan, SUBPLAN_TYPE_MERGE); + code = splCreateExchangeNode(pCxt, info.pSubplan, (SLogicNode*)info.pScan, SUBPLAN_TYPE_MERGE); } ++(pCxt->groupId); pCxt->split = true; return code; } -static bool ctjIsSingleTable(int8_t tableType) { - return (TSDB_CHILD_TABLE == tableType || TSDB_NORMAL_TABLE == tableType); +static bool needSplit(SJoinLogicNode* pJoin) { + if (!pJoin->isSingleTableJoin) { + return false; + } + return QUERY_NODE_LOGIC_PLAN_EXCHANGE != nodeType(nodesListGetNode(pJoin->node.pChildren, 0)) && + QUERY_NODE_LOGIC_PLAN_EXCHANGE != nodeType(nodesListGetNode(pJoin->node.pChildren, 1)); } -static SLogicNode* ctjMatchByNode(SLogicNode* pNode) { - if (QUERY_NODE_LOGIC_PLAN_JOIN == nodeType(pNode)) { - SLogicNode* pLeft = (SLogicNode*)nodesListGetNode(pNode->pChildren, 0); - SLogicNode* pRight = (SLogicNode*)nodesListGetNode(pNode->pChildren, 1); - if (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pLeft) && ctjIsSingleTable(((SScanLogicNode*)pLeft)->pMeta->tableType) && - QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pRight) && - ctjIsSingleTable(((SScanLogicNode*)pRight)->pMeta->tableType)) { - return pRight; - } +static SJoinLogicNode* ctjMatchByNode(SLogicNode* pNode) { + if (QUERY_NODE_LOGIC_PLAN_JOIN == nodeType(pNode) && needSplit((SJoinLogicNode*)pNode)) { + return (SJoinLogicNode*)pNode; } SNode* pChild; FOREACH(pChild, pNode->pChildren) { - SLogicNode* pSplitNode = ctjMatchByNode((SLogicNode*)pChild); + SJoinLogicNode* pSplitNode = ctjMatchByNode((SLogicNode*)pChild); if (NULL != pSplitNode) { return pSplitNode; } @@ -183,23 +183,23 @@ static SLogicNode* ctjMatchByNode(SLogicNode* pNode) { } static bool ctjFindSplitNode(SLogicSubplan* pSubplan, SCtjInfo* pInfo) { - SLogicNode* pSplitNode = ctjMatchByNode(pSubplan->pNode); - if (NULL != pSplitNode) { - pInfo->pScan = (SScanLogicNode*)pSplitNode; + SJoinLogicNode* pJoin = ctjMatchByNode(pSubplan->pNode); + if (NULL != pJoin) { + pInfo->pJoin = pJoin; + pInfo->pSplitNode = nodesListGetNode(pJoin->node.pChildren, 1); pInfo->pSubplan = pSubplan; } - return NULL != pSplitNode; + return NULL != pJoin; } static int32_t ctjSplit(SSplitContext* pCxt, SLogicSubplan* pSubplan) { SCtjInfo info = {0}; - if (!splMatch(pCxt, pSubplan, SPLIT_FLAG_CTJ, (FSplFindSplitNode)ctjFindSplitNode, &info)) { + if (!splMatch(pCxt, pSubplan, 0, (FSplFindSplitNode)ctjFindSplitNode, &info)) { return TSDB_CODE_SUCCESS; } - int32_t code = - nodesListMakeStrictAppend(&info.pSubplan->pChildren, splCreateScanSubplan(pCxt, info.pScan, SPLIT_FLAG_CTJ)); + int32_t code = nodesListMakeStrictAppend(&info.pSubplan->pChildren, splCreateSubplan(pCxt, info.pSplitNode, 0)); if (TSDB_CODE_SUCCESS == code) { - code = splCreateExchangeNode(pCxt, info.pSubplan, info.pScan, info.pSubplan->subplanType); + code = splCreateExchangeNode(pCxt, info.pSubplan, info.pSplitNode, info.pSubplan->subplanType); } ++(pCxt->groupId); pCxt->split = true; diff --git a/source/libs/planner/src/planner.c b/source/libs/planner/src/planner.c index 8e6c04bb337b900b1ebc8b33c73b29d111231ab3..af62c52a89baa90aaf857fa6606267a437275f87 100644 --- a/source/libs/planner/src/planner.c +++ b/source/libs/planner/src/planner.c @@ -18,6 +18,13 @@ #include "planInt.h" #include "scalar.h" +static void dumpQueryPlan(SQueryPlan* pPlan) { + char* pStr = NULL; + nodesNodeToString(pPlan, false, &pStr, NULL); + planDebugL("Query Plan: %s", pStr); + taosMemoryFree(pStr); +} + int32_t qCreateQueryPlan(SPlanContext* pCxt, SQueryPlan** pPlan, SArray* pExecNodeList) { SLogicNode* pLogicNode = NULL; SLogicSubplan* pLogicSubplan = NULL; @@ -36,6 +43,9 @@ int32_t qCreateQueryPlan(SPlanContext* pCxt, SQueryPlan** pPlan, SArray* pExecNo if (TSDB_CODE_SUCCESS == code) { code = createPhysiPlan(pCxt, pLogicPlan, pPlan, pExecNodeList); } + if (TSDB_CODE_SUCCESS == code) { + dumpQueryPlan(*pPlan); + } nodesDestroyNode(pLogicNode); nodesDestroyNode(pLogicSubplan); diff --git a/source/libs/planner/test/CMakeLists.txt b/source/libs/planner/test/CMakeLists.txt index a21b36fef6b3eecc51bdbe4abbb7fff3dc065098..abea60b0c798a055617abf3693be25f365fbc867 100644 --- a/source/libs/planner/test/CMakeLists.txt +++ b/source/libs/planner/test/CMakeLists.txt @@ -32,7 +32,9 @@ if(${BUILD_WINGETOPT}) target_link_libraries(plannerTest PUBLIC wingetopt) endif() -add_test( - NAME plannerTest - COMMAND plannerTest -) +if(NOT TD_WINDOWS) + add_test( + NAME plannerTest + COMMAND plannerTest + ) +endif(NOT TD_WINDOWS) diff --git a/source/libs/planner/test/planBasicTest.cpp b/source/libs/planner/test/planBasicTest.cpp index a17d8cd850d51ed38877b2db1550865408879666..4b84079f7bd417b538a789ff964675208d9ea0bf 100644 --- a/source/libs/planner/test/planBasicTest.cpp +++ b/source/libs/planner/test/planBasicTest.cpp @@ -50,4 +50,6 @@ TEST_F(PlanBasicTest, func) { run("SELECT DIFF(c1) FROM t1"); run("SELECT PERCENTILE(c1, 60) FROM t1"); + + run("SELECT TOP(c1, 60) FROM t1"); } diff --git a/source/libs/planner/test/planGroupByTest.cpp b/source/libs/planner/test/planGroupByTest.cpp index 9ca1001f4cb24224d9dd223e366c9eed83db4fbe..cf516034707e53a230d4e2e7af6fb81c3b8aaecf 100644 --- a/source/libs/planner/test/planGroupByTest.cpp +++ b/source/libs/planner/test/planGroupByTest.cpp @@ -49,6 +49,8 @@ TEST_F(PlanGroupByTest, aggFunc) { run("SELECT LAST(*), FIRST(*) FROM t1"); run("SELECT LAST(*), FIRST(*) FROM t1 GROUP BY c1"); + + run("SELECT SUM(10), COUNT(c1) FROM t1 GROUP BY c2"); } TEST_F(PlanGroupByTest, selectFunc) { diff --git a/source/libs/planner/test/planJoinTest.cpp b/source/libs/planner/test/planJoinTest.cpp index eaedbd1db0036d78084026cf8864ccb977fed80f..a3c5258e33dfb7ccbb6db5bbd600a6efdd01359d 100644 --- a/source/libs/planner/test/planJoinTest.cpp +++ b/source/libs/planner/test/planJoinTest.cpp @@ -44,3 +44,9 @@ TEST_F(PlanJoinTest, withWhere) { run("SELECT t1.c1, t2.c1 FROM st1s1 t1 JOIN st1s2 t2 ON t1.ts = t2.ts " "WHERE t1.c1 > t2.c1 AND t1.c2 = 'abc' AND t2.c2 = 'qwe'"); } + +TEST_F(PlanJoinTest, multiJoin) { + useDb("root", "test"); + + run("SELECT t1.c1, t2.c1 FROM st1s1 t1 JOIN st1s2 t2 ON t1.ts = t2.ts JOIN st1s3 t3 ON t1.ts = t3.ts"); +} diff --git a/source/libs/planner/test/planOptimizeTest.cpp b/source/libs/planner/test/planOptimizeTest.cpp index 77f9b5846c1edbe879c21d90c978a01232c2444e..4234a1320a433da1184cdca6d5f567fa3a2005c6 100644 --- a/source/libs/planner/test/planOptimizeTest.cpp +++ b/source/libs/planner/test/planOptimizeTest.cpp @@ -32,6 +32,12 @@ TEST_F(PlanOptimizeTest, optimizeScanData) { run("SELECT PERCENTILE(c1, 40), COUNT(*) FROM t1"); } +TEST_F(PlanOptimizeTest, ConditionPushDown) { + useDb("root", "test"); + + run("SELECT ts, c1 FROM st1 WHERE tag1 > 4"); +} + TEST_F(PlanOptimizeTest, orderByPrimaryKey) { useDb("root", "test"); diff --git a/source/libs/planner/test/planSTableTest.cpp b/source/libs/planner/test/planSTableTest.cpp index ed75b75e514aede02f41bf29ea044ccf833aef83..d1608cbad1155baf1bda19cf7c06a5121b0d581a 100644 --- a/source/libs/planner/test/planSTableTest.cpp +++ b/source/libs/planner/test/planSTableTest.cpp @@ -27,6 +27,14 @@ TEST_F(PlanSuperTableTest, pseudoCol) { run("SELECT TBNAME, tag1, tag2 FROM st1"); } +TEST_F(PlanSuperTableTest, pseudoColOnChildTable) { + useDb("root", "test"); + + run("SELECT TBNAME FROM st1s1"); + + run("SELECT TBNAME, tag1, tag2 FROM st1s1"); +} + TEST_F(PlanSuperTableTest, orderBy) { useDb("root", "test"); diff --git a/source/libs/planner/test/planSubqueryTest.cpp b/source/libs/planner/test/planSubqueryTest.cpp index 2d559c6f3b5322e4bd27bd571fc5e6829ccf262c..f82e10e9983004204544ecd16632bd2a59a37623 100644 --- a/source/libs/planner/test/planSubqueryTest.cpp +++ b/source/libs/planner/test/planSubqueryTest.cpp @@ -26,6 +26,8 @@ TEST_F(PlanSubqeuryTest, basic) { run("SELECT * FROM (SELECT * FROM t1)"); run("SELECT LAST(c1) FROM (SELECT * FROM t1)"); + + run("SELECT c1 FROM (SELECT TODAY() AS c1 FROM t1)"); } TEST_F(PlanSubqeuryTest, doubleGroupBy) { diff --git a/source/libs/planner/test/planTestMain.cpp b/source/libs/planner/test/planTestMain.cpp index 0373ab38d302c5dbc92a9341993b2fb47f7b957f..42c8558239b4389e0a0c469fdaa5842b53dc2398 100644 --- a/source/libs/planner/test/planTestMain.cpp +++ b/source/libs/planner/test/planTestMain.cpp @@ -25,7 +25,7 @@ class PlannerEnv : public testing::Environment { virtual void SetUp() { initMetaDataEnv(); generateMetaData(); - initLog("/tmp/td"); + initLog(TD_TMP_DIR_PATH "td"); } virtual void TearDown() { destroyMetaDataEnv(); } diff --git a/source/libs/planner/test/planTestUtil.cpp b/source/libs/planner/test/planTestUtil.cpp index 6e184fec724ceffbaa186367956d1318c59d2fb9..e2082d49364727719bc72f3445bcb038d5584976 100644 --- a/source/libs/planner/test/planTestUtil.cpp +++ b/source/libs/planner/test/planTestUtil.cpp @@ -14,6 +14,7 @@ */ #include "planTestUtil.h" +#include #include #include @@ -72,7 +73,7 @@ void setDumpModule(const char* pModule) { } } -void setSkipSqlNum(const char* pNum) { g_skipSql = stoi(optarg); } +void setSkipSqlNum(const char* pNum) { g_skipSql = stoi(pNum); } void setLogLevel(const char* pLogLevel) { g_logLevel = stoi(pLogLevel); } @@ -232,45 +233,45 @@ class PlannerTestBaseImpl { if (DUMP_MODULE_ALL == module || DUMP_MODULE_PARSER == module) { if (res_.prepareAst_.empty()) { - cout << "syntax tree : " << endl; + cout << "+++++++++++++++++++++syntax tree : " << endl; cout << res_.ast_ << endl; } else { - cout << "prepare syntax tree : " << endl; + cout << "+++++++++++++++++++++prepare syntax tree : " << endl; cout << res_.prepareAst_ << endl; - cout << "bound syntax tree : " << endl; + cout << "+++++++++++++++++++++bound syntax tree : " << endl; cout << res_.boundAst_ << endl; - cout << "syntax tree : " << endl; + cout << "+++++++++++++++++++++syntax tree : " << endl; cout << res_.ast_ << endl; } } if (DUMP_MODULE_ALL == module || DUMP_MODULE_LOGIC == module) { - cout << "raw logic plan : " << endl; + cout << "+++++++++++++++++++++raw logic plan : " << endl; cout << res_.rawLogicPlan_ << endl; } if (DUMP_MODULE_ALL == module || DUMP_MODULE_OPTIMIZED == module) { - cout << "optimized logic plan : " << endl; + cout << "+++++++++++++++++++++optimized logic plan : " << endl; cout << res_.optimizedLogicPlan_ << endl; } if (DUMP_MODULE_ALL == module || DUMP_MODULE_SPLIT == module) { - cout << "split logic plan : " << endl; + cout << "+++++++++++++++++++++split logic plan : " << endl; cout << res_.splitLogicPlan_ << endl; } if (DUMP_MODULE_ALL == module || DUMP_MODULE_SCALED == module) { - cout << "scaled logic plan : " << endl; + cout << "+++++++++++++++++++++scaled logic plan : " << endl; cout << res_.scaledLogicPlan_ << endl; } if (DUMP_MODULE_ALL == module || DUMP_MODULE_PHYSICAL == module) { - cout << "physical plan : " << endl; + cout << "+++++++++++++++++++++physical plan : " << endl; cout << res_.physiPlan_ << endl; } if (DUMP_MODULE_ALL == module || DUMP_MODULE_SUBPLAN == module) { - cout << "physical subplan : " << endl; + cout << "+++++++++++++++++++++physical subplan : " << endl; for (const auto& subplan : res_.physiSubplans_) { cout << subplan << endl; } diff --git a/source/libs/qcom/src/querymsg.c b/source/libs/qcom/src/querymsg.c index fb9319bedeabfbd3673c72dda58ca0c3686cd940..636b2b50a83cc300b59ef97fb7f09c09808fb717 100644 --- a/source/libs/qcom/src/querymsg.c +++ b/source/libs/qcom/src/querymsg.c @@ -22,7 +22,7 @@ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wformat-truncation" -int32_t (*queryBuildMsg[TDMT_MAX])(void *input, char **msg, int32_t msgSize, int32_t *msgLen) = {0}; +int32_t (*queryBuildMsg[TDMT_MAX])(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void*(*mallocFp)(int32_t)) = {0}; int32_t (*queryProcessMsgRsp[TDMT_MAX])(void *output, char *msg, int32_t msgSize) = {0}; int32_t queryBuildUseDbOutput(SUseDbOutput *pOut, SUseDbRsp *usedbRsp) { @@ -58,7 +58,7 @@ int32_t queryBuildUseDbOutput(SUseDbOutput *pOut, SUseDbRsp *usedbRsp) { return TSDB_CODE_SUCCESS; } -int32_t queryBuildTableMetaReqMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen) { +int32_t queryBuildTableMetaReqMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void*(*mallcFp)(int32_t)) { SBuildTableMetaInput *pInput = input; if (NULL == input || NULL == msg || NULL == msgLen) { return TSDB_CODE_TSC_INVALID_INPUT; @@ -72,7 +72,7 @@ int32_t queryBuildTableMetaReqMsg(void *input, char **msg, int32_t msgSize, int3 tstrncpy(infoReq.tbName, pInput->tbName, TSDB_TABLE_NAME_LEN); int32_t bufLen = tSerializeSTableInfoReq(NULL, 0, &infoReq); - void *pBuf = rpcMallocCont(bufLen); + void *pBuf = (*mallcFp)(bufLen); tSerializeSTableInfoReq(pBuf, bufLen, &infoReq); *msg = pBuf; @@ -81,7 +81,7 @@ int32_t queryBuildTableMetaReqMsg(void *input, char **msg, int32_t msgSize, int3 return TSDB_CODE_SUCCESS; } -int32_t queryBuildUseDbMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen) { +int32_t queryBuildUseDbMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void*(*mallcFp)(int32_t)) { SBuildUseDBInput *pInput = input; if (NULL == pInput || NULL == msg || NULL == msgLen) { return TSDB_CODE_TSC_INVALID_INPUT; @@ -95,7 +95,7 @@ int32_t queryBuildUseDbMsg(void *input, char **msg, int32_t msgSize, int32_t *ms usedbReq.numOfTable = pInput->numOfTable; int32_t bufLen = tSerializeSUseDbReq(NULL, 0, &usedbReq); - void *pBuf = rpcMallocCont(bufLen); + void *pBuf = (*mallcFp)(bufLen); tSerializeSUseDbReq(pBuf, bufLen, &usedbReq); *msg = pBuf; @@ -104,7 +104,7 @@ int32_t queryBuildUseDbMsg(void *input, char **msg, int32_t msgSize, int32_t *ms return TSDB_CODE_SUCCESS; } -int32_t queryBuildQnodeListMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen) { +int32_t queryBuildQnodeListMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void*(*mallcFp)(int32_t)) { if (NULL == msg || NULL == msgLen) { return TSDB_CODE_TSC_INVALID_INPUT; } @@ -113,7 +113,7 @@ int32_t queryBuildQnodeListMsg(void *input, char **msg, int32_t msgSize, int32_t qnodeListReq.rowNum = -1; int32_t bufLen = tSerializeSQnodeListReq(NULL, 0, &qnodeListReq); - void *pBuf = rpcMallocCont(bufLen); + void *pBuf = (*mallcFp)(bufLen); tSerializeSQnodeListReq(pBuf, bufLen, &qnodeListReq); *msg = pBuf; @@ -122,7 +122,7 @@ int32_t queryBuildQnodeListMsg(void *input, char **msg, int32_t msgSize, int32_t return TSDB_CODE_SUCCESS; } -int32_t queryBuildGetDBCfgMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen) { +int32_t queryBuildGetDBCfgMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void*(*mallcFp)(int32_t)) { if (NULL == msg || NULL == msgLen) { return TSDB_CODE_TSC_INVALID_INPUT; } @@ -131,7 +131,7 @@ int32_t queryBuildGetDBCfgMsg(void *input, char **msg, int32_t msgSize, int32_t strcpy(dbCfgReq.db, input); int32_t bufLen = tSerializeSDbCfgReq(NULL, 0, &dbCfgReq); - void *pBuf = rpcMallocCont(bufLen); + void *pBuf = (*mallcFp)(bufLen); tSerializeSDbCfgReq(pBuf, bufLen, &dbCfgReq); *msg = pBuf; @@ -140,7 +140,7 @@ int32_t queryBuildGetDBCfgMsg(void *input, char **msg, int32_t msgSize, int32_t return TSDB_CODE_SUCCESS; } -int32_t queryBuildGetIndexMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen) { +int32_t queryBuildGetIndexMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void*(*mallcFp)(int32_t)) { if (NULL == msg || NULL == msgLen) { return TSDB_CODE_TSC_INVALID_INPUT; } @@ -149,7 +149,7 @@ int32_t queryBuildGetIndexMsg(void *input, char **msg, int32_t msgSize, int32_t strcpy(indexReq.indexFName, input); int32_t bufLen = tSerializeSUserIndexReq(NULL, 0, &indexReq); - void *pBuf = rpcMallocCont(bufLen); + void *pBuf = (*mallcFp)(bufLen); tSerializeSUserIndexReq(pBuf, bufLen, &indexReq); *msg = pBuf; @@ -158,7 +158,7 @@ int32_t queryBuildGetIndexMsg(void *input, char **msg, int32_t msgSize, int32_t return TSDB_CODE_SUCCESS; } -int32_t queryBuildRetrieveFuncMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen) { +int32_t queryBuildRetrieveFuncMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void*(*mallcFp)(int32_t)) { if (NULL == msg || NULL == msgLen) { return TSDB_CODE_TSC_INVALID_INPUT; } @@ -170,7 +170,7 @@ int32_t queryBuildRetrieveFuncMsg(void *input, char **msg, int32_t msgSize, int3 taosArrayPush(funcReq.pFuncNames, input); int32_t bufLen = tSerializeSRetrieveFuncReq(NULL, 0, &funcReq); - void *pBuf = rpcMallocCont(bufLen); + void *pBuf = (*mallcFp)(bufLen); tSerializeSRetrieveFuncReq(pBuf, bufLen, &funcReq); taosArrayDestroy(funcReq.pFuncNames); @@ -181,7 +181,7 @@ int32_t queryBuildRetrieveFuncMsg(void *input, char **msg, int32_t msgSize, int3 return TSDB_CODE_SUCCESS; } -int32_t queryBuildGetUserAuthMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen) { +int32_t queryBuildGetUserAuthMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void*(*mallcFp)(int32_t)) { if (NULL == msg || NULL == msgLen) { return TSDB_CODE_TSC_INVALID_INPUT; } @@ -190,7 +190,7 @@ int32_t queryBuildGetUserAuthMsg(void *input, char **msg, int32_t msgSize, int32 strncpy(req.user, input, sizeof(req.user)); int32_t bufLen = tSerializeSGetUserAuthReq(NULL, 0, &req); - void *pBuf = rpcMallocCont(bufLen); + void *pBuf = (*mallcFp)(bufLen); tSerializeSGetUserAuthReq(pBuf, bufLen, &req); *msg = pBuf; diff --git a/source/libs/qworker/inc/qworkerInt.h b/source/libs/qworker/inc/qwInt.h similarity index 88% rename from source/libs/qworker/inc/qworkerInt.h rename to source/libs/qworker/inc/qwInt.h index 511327658f14a58e25460f979a4ebb197c8d4b8c..b0a102069dc7d00e3002d14c76ec9c65f0854d92 100644 --- a/source/libs/qworker/inc/qworkerInt.h +++ b/source/libs/qworker/inc/qwInt.h @@ -26,7 +26,7 @@ extern "C" { #include "ttimer.h" #include "tref.h" #include "plannodes.h" - +#include "executor.h" #include "trpc.h" #define QW_DEFAULT_SCHEDULER_NUMBER 10000 @@ -76,6 +76,8 @@ typedef struct SQWDebug { bool dumpEnable; } SQWDebug; +extern SQWDebug gQWDebug; + typedef struct SQWMsg { void *node; int32_t code; @@ -143,6 +145,15 @@ typedef struct SQWSchStatus { SHashObj *tasksHash; // key:queryId+taskId, value: SQWTaskStatus } SQWSchStatus; +typedef struct SQWWaitTimeStat { + uint64_t num; + uint64_t total; +} SQWWaitTimeStat; + +typedef struct SQWStat { + SQWWaitTimeStat msgWait[2]; +} SQWStat; + // Qnode/Vnode level task management typedef struct SQWorker { int64_t refId; @@ -153,9 +164,10 @@ typedef struct SQWorker { tmr_h hbTimer; SRWLatch schLock; // SRWLatch ctxLock; - SHashObj *schHash; // key: schedulerId, value: SQWSchStatus - SHashObj *ctxHash; // key: queryId+taskId, value: SQWTaskCtx - SMsgCb msgCb; + SHashObj *schHash; // key: schedulerId, value: SQWSchStatus + SHashObj *ctxHash; // key: queryId+taskId, value: SQWTaskCtx + SMsgCb msgCb; + SQWStat stat; } SQWorker; typedef struct SQWorkerMgmt { @@ -227,6 +239,7 @@ typedef struct SQWorkerMgmt { #define QW_ELOG(_param, ...) qError("QW:%p " _param, mgmt, __VA_ARGS__) #define QW_DLOG(_param, ...) qDebug("QW:%p " _param, mgmt, __VA_ARGS__) +#define QW_TLOG(_param, ...) qTrace("QW:%p " _param, mgmt, __VA_ARGS__) #define QW_DUMP(_param, ...) \ do { \ @@ -302,9 +315,29 @@ typedef struct SQWorkerMgmt { extern SQWorkerMgmt gQwMgmt; static FORCE_INLINE SQWorker *qwAcquire(int64_t refId) { return (SQWorker *)taosAcquireRef(atomic_load_32(&gQwMgmt.qwRef), refId); } - static FORCE_INLINE int32_t qwRelease(int64_t refId) { return taosReleaseRef(gQwMgmt.qwRef, refId); } +char *qwPhaseStr(int32_t phase); +char *qwBufStatusStr(int32_t bufStatus); +int32_t qwAcquireAddScheduler(SQWorker *mgmt, uint64_t sId, int32_t rwType, SQWSchStatus **sch); +void qwReleaseScheduler(int32_t rwType, SQWorker *mgmt); +int32_t qwAddTaskStatus(QW_FPARAMS_DEF, int32_t status); +int32_t qwAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx); +int32_t qwGetTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx); +int32_t qwAddAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx); +void qwReleaseTaskCtx(SQWorker *mgmt, void *ctx); +int32_t qwKillTaskHandle(QW_FPARAMS_DEF, SQWTaskCtx *ctx); +int32_t qwUpdateTaskStatus(QW_FPARAMS_DEF, int8_t status); +int32_t qwDropTask(QW_FPARAMS_DEF); +void qwSaveTbVersionInfo(qTaskInfo_t pTaskInfo, SQWTaskCtx *ctx); +int32_t qwOpenRef(void); +void qwSetHbParam(int64_t refId, SQWHbParam **pParam); +int32_t qwUpdateWaitTimeInQueue(SQWorker *mgmt, int64_t ts, EQueueType type); +int64_t qwGetWaitTimeInQueue(SQWorker *mgmt, EQueueType type); + +void qwDbgDumpMgmtInfo(SQWorker *mgmt); +int32_t qwDbgValidateStatus(QW_FPARAMS_DEF, int8_t oriStatus, int8_t newStatus, bool *ignore); + #ifdef __cplusplus } diff --git a/source/libs/qworker/inc/qworkerMsg.h b/source/libs/qworker/inc/qwMsg.h similarity index 92% rename from source/libs/qworker/inc/qworkerMsg.h rename to source/libs/qworker/inc/qwMsg.h index 6453cff70095b246f0ede7034da07536b1075f2f..ede085b6f912842c85dce8597374613856d80f1f 100644 --- a/source/libs/qworker/inc/qworkerMsg.h +++ b/source/libs/qworker/inc/qwMsg.h @@ -20,7 +20,7 @@ extern "C" { #endif -#include "qworkerInt.h" +#include "qwInt.h" #include "dataSinkMgt.h" int32_t qwProcessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg, int8_t taskType, int8_t explain); @@ -36,12 +36,10 @@ int32_t qwBuildAndSendFetchRsp(SRpcHandleInfo *pConn, SRetrieveTableRsp *pRsp, i int32_t code); void qwBuildFetchRsp(void *msg, SOutputData *input, int32_t len, bool qComplete); int32_t qwBuildAndSendCQueryMsg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn); -int32_t qwBuildAndSendReadyRsp(SRpcHandleInfo *pConn, int32_t code, STbVerInfo* tbInfo); -int32_t qwBuildAndSendQueryRsp(SRpcHandleInfo *pConn, int32_t code); +int32_t qwBuildAndSendQueryRsp(SRpcHandleInfo *pConn, int32_t code, STbVerInfo* tbInfo); int32_t qwBuildAndSendExplainRsp(SRpcHandleInfo *pConn, SExplainExecInfo *execInfo, int32_t num); void qwFreeFetchRsp(void *msg); int32_t qwMallocFetchRsp(int32_t length, SRetrieveTableRsp **rsp); -int32_t qwGetSchTasksStatus(SQWorker *mgmt, uint64_t sId, SSchedulerStatusRsp **rsp); int32_t qwBuildAndSendHbRsp(SRpcHandleInfo *pConn, SSchedulerHbRsp *rsp, int32_t code); int32_t qwRegisterQueryBrokenLinkArg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn); int32_t qwRegisterHbBrokenLinkArg(SQWorker *mgmt, uint64_t sId, SRpcHandleInfo *pConn); diff --git a/source/libs/qworker/src/qwDbg.c b/source/libs/qworker/src/qwDbg.c new file mode 100644 index 0000000000000000000000000000000000000000..27fe22295d3706eb21a237f8d662e34b4dce9b36 --- /dev/null +++ b/source/libs/qworker/src/qwDbg.c @@ -0,0 +1,128 @@ +#include "qworker.h" +#include "dataSinkMgt.h" +#include "executor.h" +#include "planner.h" +#include "query.h" +#include "qwInt.h" +#include "qwMsg.h" +#include "tcommon.h" +#include "tmsg.h" +#include "tname.h" + +SQWDebug gQWDebug = {.statusEnable = true, .dumpEnable = true}; + +int32_t qwDbgValidateStatus(QW_FPARAMS_DEF, int8_t oriStatus, int8_t newStatus, bool *ignore) { + if (!gQWDebug.statusEnable) { + return TSDB_CODE_SUCCESS; + } + + int32_t code = 0; + + if (oriStatus == newStatus) { + if (newStatus == JOB_TASK_STATUS_EXECUTING || newStatus == JOB_TASK_STATUS_FAILED) { + *ignore = true; + return TSDB_CODE_SUCCESS; + } + + QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); + } + + switch (oriStatus) { + case JOB_TASK_STATUS_NULL: + if (newStatus != JOB_TASK_STATUS_EXECUTING && newStatus != JOB_TASK_STATUS_FAILED && + newStatus != JOB_TASK_STATUS_NOT_START) { + QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); + } + + break; + case JOB_TASK_STATUS_NOT_START: + if (newStatus != JOB_TASK_STATUS_CANCELLED) { + QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); + } + + break; + case JOB_TASK_STATUS_EXECUTING: + if (newStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED && newStatus != JOB_TASK_STATUS_SUCCEED && + newStatus != JOB_TASK_STATUS_FAILED && newStatus != JOB_TASK_STATUS_CANCELLING && + newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_DROPPING) { + QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); + } + + break; + case JOB_TASK_STATUS_PARTIAL_SUCCEED: + if (newStatus != JOB_TASK_STATUS_EXECUTING && newStatus != JOB_TASK_STATUS_SUCCEED && + newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_FAILED && + newStatus != JOB_TASK_STATUS_DROPPING) { + QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); + } + + break; + case JOB_TASK_STATUS_SUCCEED: + if (newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_DROPPING && + newStatus != JOB_TASK_STATUS_FAILED) { + QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); + } + + break; + case JOB_TASK_STATUS_FAILED: + if (newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_DROPPING) { + QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); + } + break; + + case JOB_TASK_STATUS_CANCELLING: + if (newStatus != JOB_TASK_STATUS_CANCELLED) { + QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); + } + + break; + case JOB_TASK_STATUS_CANCELLED: + case JOB_TASK_STATUS_DROPPING: + if (newStatus != JOB_TASK_STATUS_FAILED && newStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED) { + QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); + } + break; + + default: + QW_TASK_ELOG("invalid task origStatus:%s", jobTaskStatusStr(oriStatus)); + return TSDB_CODE_QRY_APP_ERROR; + } + + return TSDB_CODE_SUCCESS; + +_return: + + QW_TASK_ELOG("invalid task status update from %s to %s", jobTaskStatusStr(oriStatus), jobTaskStatusStr(newStatus)); + QW_RET(code); +} + +void qwDbgDumpSchInfo(SQWSchStatus *sch, int32_t i) {} + +void qwDbgDumpMgmtInfo(SQWorker *mgmt) { + if (!gQWDebug.dumpEnable) { + return; + } + + QW_LOCK(QW_READ, &mgmt->schLock); + + /*QW_DUMP("total remain schduler num:%d", taosHashGetSize(mgmt->schHash));*/ + + void *key = NULL; + size_t keyLen = 0; + int32_t i = 0; + SQWSchStatus *sch = NULL; + + void *pIter = taosHashIterate(mgmt->schHash, NULL); + while (pIter) { + sch = (SQWSchStatus *)pIter; + qwDbgDumpSchInfo(sch, i); + ++i; + pIter = taosHashIterate(mgmt->schHash, pIter); + } + + QW_UNLOCK(QW_READ, &mgmt->schLock); + + /*QW_DUMP("total remain ctx num:%d", taosHashGetSize(mgmt->ctxHash));*/ +} + + diff --git a/source/libs/qworker/src/qworkerMsg.c b/source/libs/qworker/src/qwMsg.c similarity index 72% rename from source/libs/qworker/src/qworkerMsg.c rename to source/libs/qworker/src/qwMsg.c index 0a192eb795b689285831f366aff30af4a3743b27..b9dc18cd2fd22ff196a300451d1d39b5bcd2353d 100644 --- a/source/libs/qworker/src/qworkerMsg.c +++ b/source/libs/qworker/src/qwMsg.c @@ -1,10 +1,10 @@ -#include "qworkerMsg.h" +#include "qwMsg.h" #include "dataSinkMgt.h" #include "executor.h" #include "planner.h" #include "query.h" #include "qworker.h" -#include "qworkerInt.h" +#include "qwInt.h" #include "tcommon.h" #include "tmsg.h" #include "tname.h" @@ -43,28 +43,8 @@ void qwFreeFetchRsp(void *msg) { } } -int32_t qwBuildAndSendQueryRsp(SRpcHandleInfo *pConn, int32_t code) { - SQueryTableRsp rsp = {.code = code}; - - int32_t contLen = tSerializeSQueryTableRsp(NULL, 0, &rsp); - void * msg = rpcMallocCont(contLen); - tSerializeSQueryTableRsp(msg, contLen, &rsp); - - SRpcMsg rpcRsp = { - .msgType = TDMT_VND_QUERY_RSP, - .pCont = msg, - .contLen = contLen, - .code = code, - .info = *pConn, - }; - - tmsgSendRsp(&rpcRsp); - - return TSDB_CODE_SUCCESS; -} - -int32_t qwBuildAndSendReadyRsp(SRpcHandleInfo *pConn, int32_t code, STbVerInfo* tbInfo) { - SResReadyRsp *pRsp = (SResReadyRsp *)rpcMallocCont(sizeof(SResReadyRsp)); +int32_t qwBuildAndSendQueryRsp(SRpcHandleInfo *pConn, int32_t code, STbVerInfo* tbInfo) { + SQueryTableRsp *pRsp = (SQueryTableRsp *)rpcMallocCont(sizeof(SQueryTableRsp)); pRsp->code = code; if (tbInfo) { strcpy(pRsp->tbFName, tbInfo->tbFName); @@ -73,13 +53,12 @@ int32_t qwBuildAndSendReadyRsp(SRpcHandleInfo *pConn, int32_t code, STbVerInfo* } SRpcMsg rpcRsp = { - .msgType = TDMT_VND_RES_READY_RSP, + .msgType = TDMT_VND_QUERY_RSP, .pCont = pRsp, .contLen = sizeof(*pRsp), .code = code, .info = *pConn, }; - rpcRsp.info.ahandle = NULL; tmsgSendRsp(&rpcRsp); @@ -177,76 +156,6 @@ int32_t qwBuildAndSendDropRsp(SRpcHandleInfo *pConn, int32_t code) { return TSDB_CODE_SUCCESS; } -int32_t qwBuildAndSendShowRsp(SRpcMsg *pMsg, int32_t code) { - int32_t numOfCols = 6; - SVShowTablesRsp showRsp = {0}; - - // showRsp.showId = 1; - showRsp.tableMeta.pSchemas = taosMemoryCalloc(numOfCols, sizeof(SSchema)); - if (showRsp.tableMeta.pSchemas == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return -1; - } - - col_id_t cols = 0; - SSchema *pSchema = showRsp.tableMeta.pSchemas; - - const SSchema *s = tGetTbnameColumnSchema(); - *pSchema = createSchema(s->type, s->bytes, ++cols, "name"); - pSchema++; - - int32_t type = TSDB_DATA_TYPE_TIMESTAMP; - *pSchema = createSchema(type, tDataTypes[type].bytes, ++cols, "created"); - pSchema++; - - type = TSDB_DATA_TYPE_SMALLINT; - *pSchema = createSchema(type, tDataTypes[type].bytes, ++cols, "columns"); - pSchema++; - - *pSchema = createSchema(s->type, s->bytes, ++cols, "stable"); - pSchema++; - - type = TSDB_DATA_TYPE_BIGINT; - *pSchema = createSchema(type, tDataTypes[type].bytes, ++cols, "uid"); - pSchema++; - - type = TSDB_DATA_TYPE_INT; - *pSchema = createSchema(type, tDataTypes[type].bytes, ++cols, "vgId"); - - assert(cols == numOfCols); - showRsp.tableMeta.numOfColumns = cols; - - int32_t bufLen = tSerializeSShowRsp(NULL, 0, &showRsp); - void * pBuf = rpcMallocCont(bufLen); - tSerializeSShowRsp(pBuf, bufLen, &showRsp); - - SRpcMsg rpcMsg = { - .info = pMsg->info, - .pCont = pBuf, - .contLen = bufLen, - .code = code, - }; - - tmsgSendRsp(&rpcMsg); - return TSDB_CODE_SUCCESS; -} - -int32_t qwBuildAndSendShowFetchRsp(SRpcMsg *pMsg, SVShowTablesFetchReq *pFetchReq) { - SVShowTablesFetchRsp *pRsp = (SVShowTablesFetchRsp *)rpcMallocCont(sizeof(SVShowTablesFetchRsp)); - int32_t handle = htonl(pFetchReq->id); - - pRsp->numOfRows = 0; - SRpcMsg rpcMsg = { - .info = pMsg->info, - .pCont = pRsp, - .contLen = sizeof(*pRsp), - .code = 0, - }; - - tmsgSendRsp(&rpcMsg); - return TSDB_CODE_SUCCESS; -} - int32_t qwBuildAndSendCQueryMsg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn) { SQueryContinueReq *req = (SQueryContinueReq *)rpcMallocCont(sizeof(SQueryContinueReq)); if (NULL == req) { @@ -339,7 +248,7 @@ int32_t qwRegisterHbBrokenLinkArg(SQWorker *mgmt, uint64_t sId, SRpcHandleInfo * return TSDB_CODE_SUCCESS; } -int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { +int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) { if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) { QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); } @@ -348,6 +257,8 @@ int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { SSubQueryMsg *msg = pMsg->pCont; SQWorker * mgmt = (SQWorker *)qWorkerMgmt; + qwUpdateWaitTimeInQueue(mgmt, ts, QUERY_QUEUE); + if (NULL == msg || pMsg->contLen <= sizeof(*msg)) { QW_ELOG("invalid query msg, msg:%p, msgLen:%d", msg, pMsg->contLen); QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); @@ -377,7 +288,7 @@ int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { return TSDB_CODE_SUCCESS; } -int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { +int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) { int32_t code = 0; int8_t status = 0; bool queryDone = false; @@ -386,6 +297,8 @@ int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { SQWTaskCtx * handles = NULL; SQWorker * mgmt = (SQWorker *)qWorkerMgmt; + qwUpdateWaitTimeInQueue(mgmt, ts, QUERY_QUEUE); + if (NULL == msg || pMsg->contLen < sizeof(*msg)) { QW_ELOG("invalid cquery msg, msg:%p, msgLen:%d", msg, pMsg->contLen); QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); @@ -407,66 +320,7 @@ int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { return TSDB_CODE_SUCCESS; } -int32_t qWorkerProcessReadyMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { - if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) { - return TSDB_CODE_QRY_INVALID_INPUT; - } - - SQWorker * mgmt = (SQWorker *)qWorkerMgmt; - SResReadyReq *msg = pMsg->pCont; - if (NULL == msg || pMsg->contLen < sizeof(*msg)) { - QW_ELOG("invalid task ready msg, msg:%p, msgLen:%d", msg, pMsg->contLen); - QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); - } - - msg->sId = be64toh(msg->sId); - msg->queryId = be64toh(msg->queryId); - msg->taskId = be64toh(msg->taskId); - - uint64_t sId = msg->sId; - uint64_t qId = msg->queryId; - uint64_t tId = msg->taskId; - int64_t rId = 0; - - SQWMsg qwMsg = {.node = node, .msg = NULL, .msgLen = 0, .connInfo = pMsg->info}; - - QW_SCH_TASK_DLOG("processReady start, node:%p, handle:%p", node, pMsg->info.handle); - - QW_ERR_RET(qwProcessReady(QW_FPARAMS(), &qwMsg)); - - QW_SCH_TASK_DLOG("processReady end, node:%p", node); - - return TSDB_CODE_SUCCESS; -} - -int32_t qWorkerProcessStatusMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { - if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) { - return TSDB_CODE_QRY_INVALID_INPUT; - } - - int32_t code = 0; - SSchTasksStatusReq *msg = pMsg->pCont; - if (NULL == msg || pMsg->contLen < sizeof(*msg)) { - qError("invalid task status msg"); - QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); - } - - SQWorker *mgmt = (SQWorker *)qWorkerMgmt; - msg->sId = htobe64(msg->sId); - uint64_t sId = msg->sId; - - SSchedulerStatusRsp *sStatus = NULL; - - // QW_ERR_JRET(qwGetSchTasksStatus(qWorkerMgmt, msg->sId, &sStatus)); - -_return: - - // QW_ERR_RET(qwBuildAndSendStatusRsp(pMsg, sStatus)); - - return TSDB_CODE_SUCCESS; -} - -int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { +int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) { if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) { return TSDB_CODE_QRY_INVALID_INPUT; } @@ -474,6 +328,8 @@ int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { SResFetchReq *msg = pMsg->pCont; SQWorker * mgmt = (SQWorker *)qWorkerMgmt; + qwUpdateWaitTimeInQueue(mgmt, ts, FETCH_QUEUE); + if (NULL == msg || pMsg->contLen < sizeof(*msg)) { QW_ELOG("invalid fetch msg, msg:%p, msgLen:%d", msg, pMsg->contLen); QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); @@ -499,13 +355,16 @@ int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { return TSDB_CODE_SUCCESS; } -int32_t qWorkerProcessFetchRsp(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { +int32_t qWorkerProcessFetchRsp(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) { + SQWorker * mgmt = (SQWorker *)qWorkerMgmt; + qwUpdateWaitTimeInQueue(mgmt, ts, FETCH_QUEUE); + qProcessFetchRsp(NULL, pMsg, NULL); pMsg->pCont = NULL; return TSDB_CODE_SUCCESS; } -int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { +int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) { if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) { return TSDB_CODE_QRY_INVALID_INPUT; } @@ -513,6 +372,9 @@ int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { SQWorker * mgmt = (SQWorker *)qWorkerMgmt; int32_t code = 0; STaskCancelReq *msg = pMsg->pCont; + + qwUpdateWaitTimeInQueue(mgmt, ts, FETCH_QUEUE); + if (NULL == msg || pMsg->contLen < sizeof(*msg)) { qError("invalid task cancel msg"); QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); @@ -540,7 +402,7 @@ _return: return TSDB_CODE_SUCCESS; } -int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { +int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) { if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) { return TSDB_CODE_QRY_INVALID_INPUT; } @@ -549,6 +411,8 @@ int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { STaskDropReq *msg = pMsg->pCont; SQWorker * mgmt = (SQWorker *)qWorkerMgmt; + qwUpdateWaitTimeInQueue(mgmt, ts, FETCH_QUEUE); + if (NULL == msg || pMsg->contLen < sizeof(*msg)) { QW_ELOG("invalid task drop msg, msg:%p, msgLen:%d", msg, pMsg->contLen); QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); @@ -579,7 +443,7 @@ int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { return TSDB_CODE_SUCCESS; } -int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { +int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) { if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) { return TSDB_CODE_QRY_INVALID_INPUT; } @@ -588,6 +452,8 @@ int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { SSchedulerHbReq req = {0}; SQWorker * mgmt = (SQWorker *)qWorkerMgmt; + qwUpdateWaitTimeInQueue(mgmt, ts, FETCH_QUEUE); + if (NULL == pMsg->pCont) { QW_ELOG("invalid hb msg, msg:%p, msgLen:%d", pMsg->pCont, pMsg->contLen); QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); @@ -613,22 +479,3 @@ int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { return TSDB_CODE_SUCCESS; } - -int32_t qWorkerProcessShowMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { - if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) { - return TSDB_CODE_QRY_INVALID_INPUT; - } - - int32_t code = 0; - SVShowTablesReq *pReq = pMsg->pCont; - QW_RET(qwBuildAndSendShowRsp(pMsg, code)); -} - -int32_t qWorkerProcessShowFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { - if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) { - return TSDB_CODE_QRY_INVALID_INPUT; - } - - SVShowTablesFetchReq *pFetchReq = pMsg->pCont; - QW_RET(qwBuildAndSendShowFetchRsp(pMsg, pFetchReq)); -} diff --git a/source/libs/qworker/src/qwUtil.c b/source/libs/qworker/src/qwUtil.c new file mode 100644 index 0000000000000000000000000000000000000000..a4bc22fc88121de7d51e3e67655468046e95c3bf --- /dev/null +++ b/source/libs/qworker/src/qwUtil.c @@ -0,0 +1,541 @@ +#include "qworker.h" +#include "dataSinkMgt.h" +#include "executor.h" +#include "planner.h" +#include "query.h" +#include "qwInt.h" +#include "qwMsg.h" +#include "tcommon.h" +#include "tmsg.h" +#include "tname.h" + +char *qwPhaseStr(int32_t phase) { + switch (phase) { + case QW_PHASE_PRE_QUERY: + return "PRE_QUERY"; + case QW_PHASE_POST_QUERY: + return "POST_QUERY"; + case QW_PHASE_PRE_FETCH: + return "PRE_FETCH"; + case QW_PHASE_POST_FETCH: + return "POST_FETCH"; + case QW_PHASE_PRE_CQUERY: + return "PRE_CQUERY"; + case QW_PHASE_POST_CQUERY: + return "POST_CQUERY"; + default: + break; + } + + return "UNKNOWN"; +} + +char *qwBufStatusStr(int32_t bufStatus) { + switch (bufStatus) { + case DS_BUF_LOW: + return "LOW"; + case DS_BUF_FULL: + return "FULL"; + case DS_BUF_EMPTY: + return "EMPTY"; + default: + break; + } + + return "UNKNOWN"; +} + +int32_t qwSetTaskStatus(QW_FPARAMS_DEF, SQWTaskStatus *task, int8_t status) { + int32_t code = 0; + int8_t origStatus = 0; + bool ignore = false; + + while (true) { + origStatus = atomic_load_8(&task->status); + + QW_ERR_RET(qwDbgValidateStatus(QW_FPARAMS(), origStatus, status, &ignore)); + if (ignore) { + break; + } + + if (origStatus != atomic_val_compare_exchange_8(&task->status, origStatus, status)) { + continue; + } + + QW_TASK_DLOG("task status updated from %s to %s", jobTaskStatusStr(origStatus), jobTaskStatusStr(status)); + + break; + } + + return TSDB_CODE_SUCCESS; +} + +int32_t qwAddSchedulerImpl(SQWorker *mgmt, uint64_t sId, int32_t rwType) { + SQWSchStatus newSch = {0}; + newSch.tasksHash = + taosHashInit(mgmt->cfg.maxSchTaskNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + if (NULL == newSch.tasksHash) { + QW_SCH_ELOG("taosHashInit %d failed", mgmt->cfg.maxSchTaskNum); + QW_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + QW_LOCK(QW_WRITE, &mgmt->schLock); + int32_t code = taosHashPut(mgmt->schHash, &sId, sizeof(sId), &newSch, sizeof(newSch)); + if (0 != code) { + if (!HASH_NODE_EXIST(code)) { + QW_UNLOCK(QW_WRITE, &mgmt->schLock); + + QW_SCH_ELOG("taosHashPut new sch to scheduleHash failed, errno:%d", errno); + taosHashCleanup(newSch.tasksHash); + QW_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + taosHashCleanup(newSch.tasksHash); + } + QW_UNLOCK(QW_WRITE, &mgmt->schLock); + + return TSDB_CODE_SUCCESS; +} + +int32_t qwAcquireSchedulerImpl(SQWorker *mgmt, uint64_t sId, int32_t rwType, SQWSchStatus **sch, int32_t nOpt) { + while (true) { + QW_LOCK(rwType, &mgmt->schLock); + *sch = taosHashGet(mgmt->schHash, &sId, sizeof(sId)); + if (NULL == (*sch)) { + QW_UNLOCK(rwType, &mgmt->schLock); + + if (QW_NOT_EXIST_ADD == nOpt) { + QW_ERR_RET(qwAddSchedulerImpl(mgmt, sId, rwType)); + + nOpt = QW_NOT_EXIST_RET_ERR; + + continue; + } else if (QW_NOT_EXIST_RET_ERR == nOpt) { + QW_RET(TSDB_CODE_QRY_SCH_NOT_EXIST); + } else { + QW_SCH_ELOG("unknown notExistOpt:%d", nOpt); + QW_ERR_RET(TSDB_CODE_QRY_APP_ERROR); + } + } + + break; + } + + return TSDB_CODE_SUCCESS; +} + +int32_t qwAcquireAddScheduler(SQWorker *mgmt, uint64_t sId, int32_t rwType, SQWSchStatus **sch) { + return qwAcquireSchedulerImpl(mgmt, sId, rwType, sch, QW_NOT_EXIST_ADD); +} + +int32_t qwAcquireScheduler(SQWorker *mgmt, uint64_t sId, int32_t rwType, SQWSchStatus **sch) { + return qwAcquireSchedulerImpl(mgmt, sId, rwType, sch, QW_NOT_EXIST_RET_ERR); +} + +void qwReleaseScheduler(int32_t rwType, SQWorker *mgmt) { QW_UNLOCK(rwType, &mgmt->schLock); } + +int32_t qwAcquireTaskStatus(QW_FPARAMS_DEF, int32_t rwType, SQWSchStatus *sch, SQWTaskStatus **task) { + char id[sizeof(qId) + sizeof(tId)] = {0}; + QW_SET_QTID(id, qId, tId); + + QW_LOCK(rwType, &sch->tasksLock); + *task = taosHashGet(sch->tasksHash, id, sizeof(id)); + if (NULL == (*task)) { + QW_UNLOCK(rwType, &sch->tasksLock); + QW_ERR_RET(TSDB_CODE_QRY_TASK_NOT_EXIST); + } + + return TSDB_CODE_SUCCESS; +} + +int32_t qwAddTaskStatusImpl(QW_FPARAMS_DEF, SQWSchStatus *sch, int32_t rwType, int32_t status, SQWTaskStatus **task) { + int32_t code = 0; + + char id[sizeof(qId) + sizeof(tId)] = {0}; + QW_SET_QTID(id, qId, tId); + + SQWTaskStatus ntask = {0}; + ntask.status = status; + ntask.refId = rId; + + QW_LOCK(QW_WRITE, &sch->tasksLock); + code = taosHashPut(sch->tasksHash, id, sizeof(id), &ntask, sizeof(ntask)); + if (0 != code) { + QW_UNLOCK(QW_WRITE, &sch->tasksLock); + if (HASH_NODE_EXIST(code)) { + if (rwType && task) { + QW_RET(qwAcquireTaskStatus(QW_FPARAMS(), rwType, sch, task)); + } else { + QW_TASK_ELOG("task status already exist, newStatus:%s", jobTaskStatusStr(status)); + QW_ERR_RET(TSDB_CODE_QRY_TASK_ALREADY_EXIST); + } + } else { + QW_TASK_ELOG("taosHashPut to tasksHash failed, error:%x - %s", code, tstrerror(code)); + QW_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + } + QW_UNLOCK(QW_WRITE, &sch->tasksLock); + + QW_TASK_DLOG("task status added, newStatus:%s", jobTaskStatusStr(status)); + + if (rwType && task) { + QW_ERR_RET(qwAcquireTaskStatus(QW_FPARAMS(), rwType, sch, task)); + } + + return TSDB_CODE_SUCCESS; +} + +int32_t qwAddTaskStatus(QW_FPARAMS_DEF, int32_t status) { + SQWSchStatus *tsch = NULL; + int32_t code = 0; + QW_ERR_RET(qwAcquireAddScheduler(mgmt, sId, QW_READ, &tsch)); + + QW_ERR_JRET(qwAddTaskStatusImpl(QW_FPARAMS(), tsch, 0, status, NULL)); + +_return: + + qwReleaseScheduler(QW_READ, mgmt); + + QW_RET(code); +} + +int32_t qwAddAcquireTaskStatus(QW_FPARAMS_DEF, int32_t rwType, SQWSchStatus *sch, int32_t status, + SQWTaskStatus **task) { + return qwAddTaskStatusImpl(QW_FPARAMS(), sch, rwType, status, task); +} + +void qwReleaseTaskStatus(int32_t rwType, SQWSchStatus *sch) { QW_UNLOCK(rwType, &sch->tasksLock); } + +int32_t qwAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) { + char id[sizeof(qId) + sizeof(tId)] = {0}; + QW_SET_QTID(id, qId, tId); + + *ctx = taosHashAcquire(mgmt->ctxHash, id, sizeof(id)); + if (NULL == (*ctx)) { + QW_TASK_DLOG_E("task ctx not exist, may be dropped"); + QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST); + } + + return TSDB_CODE_SUCCESS; +} + +int32_t qwGetTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) { + char id[sizeof(qId) + sizeof(tId)] = {0}; + QW_SET_QTID(id, qId, tId); + + *ctx = taosHashGet(mgmt->ctxHash, id, sizeof(id)); + if (NULL == (*ctx)) { + QW_TASK_DLOG_E("task ctx not exist, may be dropped"); + QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST); + } + + return TSDB_CODE_SUCCESS; +} + +int32_t qwAddTaskCtxImpl(QW_FPARAMS_DEF, bool acquire, SQWTaskCtx **ctx) { + char id[sizeof(qId) + sizeof(tId)] = {0}; + QW_SET_QTID(id, qId, tId); + + SQWTaskCtx nctx = {0}; + + int32_t code = taosHashPut(mgmt->ctxHash, id, sizeof(id), &nctx, sizeof(SQWTaskCtx)); + if (0 != code) { + if (HASH_NODE_EXIST(code)) { + if (acquire && ctx) { + QW_RET(qwAcquireTaskCtx(QW_FPARAMS(), ctx)); + } else if (ctx) { + QW_RET(qwGetTaskCtx(QW_FPARAMS(), ctx)); + } else { + QW_TASK_ELOG_E("task ctx already exist"); + QW_ERR_RET(TSDB_CODE_QRY_TASK_ALREADY_EXIST); + } + } else { + QW_TASK_ELOG("taosHashPut to ctxHash failed, error:%x", code); + QW_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + } + + if (acquire && ctx) { + QW_RET(qwAcquireTaskCtx(QW_FPARAMS(), ctx)); + } else if (ctx) { + QW_RET(qwGetTaskCtx(QW_FPARAMS(), ctx)); + } + + return TSDB_CODE_SUCCESS; +} + +int32_t qwAddTaskCtx(QW_FPARAMS_DEF) { QW_RET(qwAddTaskCtxImpl(QW_FPARAMS(), false, NULL)); } + +int32_t qwAddAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) { return qwAddTaskCtxImpl(QW_FPARAMS(), true, ctx); } + +void qwReleaseTaskCtx(SQWorker *mgmt, void *ctx) { taosHashRelease(mgmt->ctxHash, ctx); } + +void qwFreeTaskHandle(QW_FPARAMS_DEF, qTaskInfo_t *taskHandle) { + // Note: free/kill may in RC + qTaskInfo_t otaskHandle = atomic_load_ptr(taskHandle); + if (otaskHandle && atomic_val_compare_exchange_ptr(taskHandle, otaskHandle, NULL)) { + qDestroyTask(otaskHandle); + } +} + +int32_t qwKillTaskHandle(QW_FPARAMS_DEF, SQWTaskCtx *ctx) { + int32_t code = 0; + // Note: free/kill may in RC + qTaskInfo_t taskHandle = atomic_load_ptr(&ctx->taskHandle); + if (taskHandle && atomic_val_compare_exchange_ptr(&ctx->taskHandle, taskHandle, NULL)) { + code = qAsyncKillTask(taskHandle); + atomic_store_ptr(&ctx->taskHandle, taskHandle); + } + + QW_RET(code); +} + +void qwFreeTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx) { + tmsgReleaseHandle(&ctx->ctrlConnInfo, TAOS_CONN_SERVER); + ctx->ctrlConnInfo.handle = NULL; + ctx->ctrlConnInfo.refId = -1; + + // NO need to release dataConnInfo + + qwFreeTaskHandle(QW_FPARAMS(), &ctx->taskHandle); + + if (ctx->sinkHandle) { + dsDestroyDataSinker(ctx->sinkHandle); + ctx->sinkHandle = NULL; + } + + if (ctx->plan) { + nodesDestroyNode(ctx->plan); + ctx->plan = NULL; + } +} + +int32_t qwDropTaskCtx(QW_FPARAMS_DEF) { + char id[sizeof(qId) + sizeof(tId)] = {0}; + QW_SET_QTID(id, qId, tId); + SQWTaskCtx octx; + + SQWTaskCtx *ctx = taosHashGet(mgmt->ctxHash, id, sizeof(id)); + if (NULL == ctx) { + QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST); + } + + octx = *ctx; + + atomic_store_ptr(&ctx->taskHandle, NULL); + atomic_store_ptr(&ctx->sinkHandle, NULL); + atomic_store_ptr(&ctx->plan, NULL); + + QW_SET_EVENT_PROCESSED(ctx, QW_EVENT_DROP); + + if (taosHashRemove(mgmt->ctxHash, id, sizeof(id))) { + QW_TASK_ELOG_E("taosHashRemove from ctx hash failed"); + QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST); + } + + qwFreeTask(QW_FPARAMS(), &octx); + + QW_TASK_DLOG_E("task ctx dropped"); + + return TSDB_CODE_SUCCESS; +} + +int32_t qwDropTaskStatus(QW_FPARAMS_DEF) { + SQWSchStatus *sch = NULL; + SQWTaskStatus *task = NULL; + int32_t code = 0; + + char id[sizeof(qId) + sizeof(tId)] = {0}; + QW_SET_QTID(id, qId, tId); + + if (qwAcquireScheduler(mgmt, sId, QW_WRITE, &sch)) { + QW_TASK_WLOG_E("scheduler does not exist"); + return TSDB_CODE_SUCCESS; + } + + if (qwAcquireTaskStatus(QW_FPARAMS(), QW_WRITE, sch, &task)) { + qwReleaseScheduler(QW_WRITE, mgmt); + + QW_TASK_WLOG_E("task does not exist"); + return TSDB_CODE_SUCCESS; + } + + if (taosHashRemove(sch->tasksHash, id, sizeof(id))) { + QW_TASK_ELOG_E("taosHashRemove task from hash failed"); + QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); + } + + QW_TASK_DLOG_E("task status dropped"); + +_return: + + if (task) { + qwReleaseTaskStatus(QW_WRITE, sch); + } + qwReleaseScheduler(QW_WRITE, mgmt); + + QW_RET(code); +} + +int32_t qwUpdateTaskStatus(QW_FPARAMS_DEF, int8_t status) { + SQWSchStatus *sch = NULL; + SQWTaskStatus *task = NULL; + int32_t code = 0; + + QW_ERR_RET(qwAcquireScheduler(mgmt, sId, QW_READ, &sch)); + QW_ERR_JRET(qwAcquireTaskStatus(QW_FPARAMS(), QW_READ, sch, &task)); + + QW_ERR_JRET(qwSetTaskStatus(QW_FPARAMS(), task, status)); + +_return: + + if (task) { + qwReleaseTaskStatus(QW_READ, sch); + } + qwReleaseScheduler(QW_READ, mgmt); + + QW_RET(code); +} + +int32_t qwDropTask(QW_FPARAMS_DEF) { + QW_ERR_RET(qwDropTaskStatus(QW_FPARAMS())); + QW_ERR_RET(qwDropTaskCtx(QW_FPARAMS())); + + QW_TASK_DLOG_E("task is dropped"); + + return TSDB_CODE_SUCCESS; +} + + +void qwSetHbParam(int64_t refId, SQWHbParam **pParam) { + int32_t paramIdx = 0; + int32_t newParamIdx = 0; + + while (true) { + paramIdx = atomic_load_32(&gQwMgmt.paramIdx); + if (paramIdx == tListLen(gQwMgmt.param)) { + newParamIdx = 0; + } else { + newParamIdx = paramIdx + 1; + } + + if (paramIdx == atomic_val_compare_exchange_32(&gQwMgmt.paramIdx, paramIdx, newParamIdx)) { + break; + } + } + + gQwMgmt.param[paramIdx].qwrId = gQwMgmt.qwRef; + gQwMgmt.param[paramIdx].refId = refId; + + *pParam = &gQwMgmt.param[paramIdx]; +} + + +void qwSaveTbVersionInfo(qTaskInfo_t pTaskInfo, SQWTaskCtx *ctx) { + char dbFName[TSDB_DB_FNAME_LEN]; + char tbName[TSDB_TABLE_NAME_LEN]; + + qGetQueriedTableSchemaVersion(pTaskInfo, dbFName, tbName, &ctx->tbInfo.sversion, &ctx->tbInfo.tversion); + + if (dbFName[0] && tbName[0]) { + sprintf(ctx->tbInfo.tbFName, "%s.%s", dbFName, tbName); + } else { + ctx->tbInfo.tbFName[0] = 0; + } +} + + +void qwCloseRef(void) { + taosWLockLatch(&gQwMgmt.lock); + if (atomic_load_32(&gQwMgmt.qwNum) <= 0 && gQwMgmt.qwRef >= 0) { + taosCloseRef(gQwMgmt.qwRef); + gQwMgmt.qwRef = -1; + } + taosWUnLockLatch(&gQwMgmt.lock); +} + + +void qwDestroySchStatus(SQWSchStatus *pStatus) { taosHashCleanup(pStatus->tasksHash); } + +void qwDestroyImpl(void *pMgmt) { + SQWorker *mgmt = (SQWorker *)pMgmt; + + taosTmrStopA(&mgmt->hbTimer); + taosTmrCleanUp(mgmt->timer); + + // TODO STOP ALL QUERY + + // TODO FREE ALL + + taosHashCleanup(mgmt->ctxHash); + + void *pIter = taosHashIterate(mgmt->schHash, NULL); + while (pIter) { + SQWSchStatus *sch = (SQWSchStatus *)pIter; + qwDestroySchStatus(sch); + pIter = taosHashIterate(mgmt->schHash, pIter); + } + taosHashCleanup(mgmt->schHash); + + taosMemoryFree(mgmt); + + atomic_sub_fetch_32(&gQwMgmt.qwNum, 1); + + qwCloseRef(); +} + +int32_t qwOpenRef(void) { + taosWLockLatch(&gQwMgmt.lock); + if (gQwMgmt.qwRef < 0) { + gQwMgmt.qwRef = taosOpenRef(100, qwDestroyImpl); + if (gQwMgmt.qwRef < 0) { + taosWUnLockLatch(&gQwMgmt.lock); + qError("init qworker ref failed"); + QW_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + } + taosWUnLockLatch(&gQwMgmt.lock); + + return TSDB_CODE_SUCCESS; +} + +int32_t qwUpdateWaitTimeInQueue(SQWorker *mgmt, int64_t ts, EQueueType type) { + if (ts <= 0) { + return TSDB_CODE_SUCCESS; + } + + int64_t duration = taosGetTimestampUs() - ts; + switch (type) { + case QUERY_QUEUE: + ++mgmt->stat.msgWait[0].num; + mgmt->stat.msgWait[0].total += duration; + break; + case FETCH_QUEUE: + ++mgmt->stat.msgWait[1].num; + mgmt->stat.msgWait[1].total += duration; + break; + default: + qError("unsupported queue type %d", type); + return TSDB_CODE_APP_ERROR; + } + + return TSDB_CODE_SUCCESS; +} + +int64_t qwGetWaitTimeInQueue(SQWorker *mgmt, EQueueType type) { + SQWWaitTimeStat *pStat = NULL; + switch (type) { + case QUERY_QUEUE: + pStat = &mgmt->stat.msgWait[0]; + return pStat->num ? (pStat->total/pStat->num) : 0; + case FETCH_QUEUE: + pStat = &mgmt->stat.msgWait[1]; + return pStat->num ? (pStat->total/pStat->num) : 0; + default: + qError("unsupported queue type %d", type); + return -1; + } +} + + + diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c index 5b358fb0080bef9f9df4a34a26a4816662154f59..7201820854e6a87a1dffc12a47c37b8d6b692668 100644 --- a/source/libs/qworker/src/qworker.c +++ b/source/libs/qworker/src/qworker.c @@ -3,545 +3,58 @@ #include "executor.h" #include "planner.h" #include "query.h" -#include "qworkerInt.h" -#include "qworkerMsg.h" +#include "qwInt.h" +#include "qwMsg.h" #include "tcommon.h" #include "tmsg.h" #include "tname.h" -SQWDebug gQWDebug = {.statusEnable = true, .dumpEnable = true}; SQWorkerMgmt gQwMgmt = { .lock = 0, .qwRef = -1, .qwNum = 0, }; -int32_t qwDbgValidateStatus(QW_FPARAMS_DEF, int8_t oriStatus, int8_t newStatus, bool *ignore) { - if (!gQWDebug.statusEnable) { - return TSDB_CODE_SUCCESS; - } - - int32_t code = 0; - - if (oriStatus == newStatus) { - if (newStatus == JOB_TASK_STATUS_EXECUTING || newStatus == JOB_TASK_STATUS_FAILED) { - *ignore = true; - return TSDB_CODE_SUCCESS; - } - - QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); - } - - switch (oriStatus) { - case JOB_TASK_STATUS_NULL: - if (newStatus != JOB_TASK_STATUS_EXECUTING && newStatus != JOB_TASK_STATUS_FAILED && - newStatus != JOB_TASK_STATUS_NOT_START) { - QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); - } - - break; - case JOB_TASK_STATUS_NOT_START: - if (newStatus != JOB_TASK_STATUS_CANCELLED) { - QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); - } - - break; - case JOB_TASK_STATUS_EXECUTING: - if (newStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED && newStatus != JOB_TASK_STATUS_SUCCEED && - newStatus != JOB_TASK_STATUS_FAILED && newStatus != JOB_TASK_STATUS_CANCELLING && - newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_DROPPING) { - QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); - } - - break; - case JOB_TASK_STATUS_PARTIAL_SUCCEED: - if (newStatus != JOB_TASK_STATUS_EXECUTING && newStatus != JOB_TASK_STATUS_SUCCEED && - newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_FAILED && - newStatus != JOB_TASK_STATUS_DROPPING) { - QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); - } - - break; - case JOB_TASK_STATUS_SUCCEED: - if (newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_DROPPING && - newStatus != JOB_TASK_STATUS_FAILED) { - QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); - } - - break; - case JOB_TASK_STATUS_FAILED: - if (newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_DROPPING) { - QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); - } - break; - - case JOB_TASK_STATUS_CANCELLING: - if (newStatus != JOB_TASK_STATUS_CANCELLED) { - QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); - } - - break; - case JOB_TASK_STATUS_CANCELLED: - case JOB_TASK_STATUS_DROPPING: - if (newStatus != JOB_TASK_STATUS_FAILED && newStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED) { - QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); - } - break; - - default: - QW_TASK_ELOG("invalid task origStatus:%s", jobTaskStatusStr(oriStatus)); - return TSDB_CODE_QRY_APP_ERROR; - } - - return TSDB_CODE_SUCCESS; - -_return: - - QW_TASK_ELOG("invalid task status update from %s to %s", jobTaskStatusStr(oriStatus), jobTaskStatusStr(newStatus)); - QW_RET(code); -} - -void qwDbgDumpSchInfo(SQWSchStatus *sch, int32_t i) {} - -void qwDbgDumpMgmtInfo(SQWorker *mgmt) { - if (!gQWDebug.dumpEnable) { - return; - } - - QW_LOCK(QW_READ, &mgmt->schLock); - - /*QW_DUMP("total remain schduler num:%d", taosHashGetSize(mgmt->schHash));*/ - - void *key = NULL; - size_t keyLen = 0; - int32_t i = 0; - SQWSchStatus *sch = NULL; - - void *pIter = taosHashIterate(mgmt->schHash, NULL); - while (pIter) { - sch = (SQWSchStatus *)pIter; - qwDbgDumpSchInfo(sch, i); - ++i; - pIter = taosHashIterate(mgmt->schHash, pIter); - } - - QW_UNLOCK(QW_READ, &mgmt->schLock); - - /*QW_DUMP("total remain ctx num:%d", taosHashGetSize(mgmt->ctxHash));*/ -} - -char *qwPhaseStr(int32_t phase) { - switch (phase) { - case QW_PHASE_PRE_QUERY: - return "PRE_QUERY"; - case QW_PHASE_POST_QUERY: - return "POST_QUERY"; - case QW_PHASE_PRE_FETCH: - return "PRE_FETCH"; - case QW_PHASE_POST_FETCH: - return "POST_FETCH"; - case QW_PHASE_PRE_CQUERY: - return "PRE_CQUERY"; - case QW_PHASE_POST_CQUERY: - return "POST_CQUERY"; - default: - break; - } - - return "UNKNOWN"; -} - -char *qwBufStatusStr(int32_t bufStatus) { - switch (bufStatus) { - case DS_BUF_LOW: - return "LOW"; - case DS_BUF_FULL: - return "FULL"; - case DS_BUF_EMPTY: - return "EMPTY"; - default: - break; - } - - return "UNKNOWN"; -} - -int32_t qwSetTaskStatus(QW_FPARAMS_DEF, SQWTaskStatus *task, int8_t status) { - int32_t code = 0; - int8_t origStatus = 0; - bool ignore = false; - - while (true) { - origStatus = atomic_load_8(&task->status); - - QW_ERR_RET(qwDbgValidateStatus(QW_FPARAMS(), origStatus, status, &ignore)); - if (ignore) { - break; - } - - if (origStatus != atomic_val_compare_exchange_8(&task->status, origStatus, status)) { - continue; - } - - QW_TASK_DLOG("task status updated from %s to %s", jobTaskStatusStr(origStatus), jobTaskStatusStr(status)); - - break; - } - - return TSDB_CODE_SUCCESS; -} - -int32_t qwAddSchedulerImpl(SQWorker *mgmt, uint64_t sId, int32_t rwType) { - SQWSchStatus newSch = {0}; - newSch.tasksHash = - taosHashInit(mgmt->cfg.maxSchTaskNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); - if (NULL == newSch.tasksHash) { - QW_SCH_ELOG("taosHashInit %d failed", mgmt->cfg.maxSchTaskNum); - QW_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - QW_LOCK(QW_WRITE, &mgmt->schLock); - int32_t code = taosHashPut(mgmt->schHash, &sId, sizeof(sId), &newSch, sizeof(newSch)); - if (0 != code) { - if (!HASH_NODE_EXIST(code)) { - QW_UNLOCK(QW_WRITE, &mgmt->schLock); - - QW_SCH_ELOG("taosHashPut new sch to scheduleHash failed, errno:%d", errno); - taosHashCleanup(newSch.tasksHash); - QW_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - taosHashCleanup(newSch.tasksHash); - } - QW_UNLOCK(QW_WRITE, &mgmt->schLock); - - return TSDB_CODE_SUCCESS; -} - -int32_t qwAcquireSchedulerImpl(SQWorker *mgmt, uint64_t sId, int32_t rwType, SQWSchStatus **sch, int32_t nOpt) { - while (true) { - QW_LOCK(rwType, &mgmt->schLock); - *sch = taosHashGet(mgmt->schHash, &sId, sizeof(sId)); - if (NULL == (*sch)) { - QW_UNLOCK(rwType, &mgmt->schLock); - - if (QW_NOT_EXIST_ADD == nOpt) { - QW_ERR_RET(qwAddSchedulerImpl(mgmt, sId, rwType)); - - nOpt = QW_NOT_EXIST_RET_ERR; - - continue; - } else if (QW_NOT_EXIST_RET_ERR == nOpt) { - QW_RET(TSDB_CODE_QRY_SCH_NOT_EXIST); - } else { - QW_SCH_ELOG("unknown notExistOpt:%d", nOpt); - QW_ERR_RET(TSDB_CODE_QRY_APP_ERROR); - } - } - break; - } - - return TSDB_CODE_SUCCESS; -} - -int32_t qwAcquireAddScheduler(SQWorker *mgmt, uint64_t sId, int32_t rwType, SQWSchStatus **sch) { - return qwAcquireSchedulerImpl(mgmt, sId, rwType, sch, QW_NOT_EXIST_ADD); -} - -int32_t qwAcquireScheduler(SQWorker *mgmt, uint64_t sId, int32_t rwType, SQWSchStatus **sch) { - return qwAcquireSchedulerImpl(mgmt, sId, rwType, sch, QW_NOT_EXIST_RET_ERR); -} - -void qwReleaseScheduler(int32_t rwType, SQWorker *mgmt) { QW_UNLOCK(rwType, &mgmt->schLock); } - -int32_t qwAcquireTaskStatus(QW_FPARAMS_DEF, int32_t rwType, SQWSchStatus *sch, SQWTaskStatus **task) { - char id[sizeof(qId) + sizeof(tId)] = {0}; - QW_SET_QTID(id, qId, tId); - - QW_LOCK(rwType, &sch->tasksLock); - *task = taosHashGet(sch->tasksHash, id, sizeof(id)); - if (NULL == (*task)) { - QW_UNLOCK(rwType, &sch->tasksLock); - QW_ERR_RET(TSDB_CODE_QRY_TASK_NOT_EXIST); - } - - return TSDB_CODE_SUCCESS; -} - -int32_t qwAddTaskStatusImpl(QW_FPARAMS_DEF, SQWSchStatus *sch, int32_t rwType, int32_t status, SQWTaskStatus **task) { - int32_t code = 0; - - char id[sizeof(qId) + sizeof(tId)] = {0}; - QW_SET_QTID(id, qId, tId); - - SQWTaskStatus ntask = {0}; - ntask.status = status; - ntask.refId = rId; - - QW_LOCK(QW_WRITE, &sch->tasksLock); - code = taosHashPut(sch->tasksHash, id, sizeof(id), &ntask, sizeof(ntask)); - if (0 != code) { - QW_UNLOCK(QW_WRITE, &sch->tasksLock); - if (HASH_NODE_EXIST(code)) { - if (rwType && task) { - QW_RET(qwAcquireTaskStatus(QW_FPARAMS(), rwType, sch, task)); - } else { - QW_TASK_ELOG("task status already exist, newStatus:%s", jobTaskStatusStr(status)); - QW_ERR_RET(TSDB_CODE_QRY_TASK_ALREADY_EXIST); - } - } else { - QW_TASK_ELOG("taosHashPut to tasksHash failed, error:%x - %s", code, tstrerror(code)); - QW_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - } - QW_UNLOCK(QW_WRITE, &sch->tasksLock); - - QW_TASK_DLOG("task status added, newStatus:%s", jobTaskStatusStr(status)); - - if (rwType && task) { - QW_ERR_RET(qwAcquireTaskStatus(QW_FPARAMS(), rwType, sch, task)); - } - - return TSDB_CODE_SUCCESS; -} - -int32_t qwAddTaskStatus(QW_FPARAMS_DEF, int32_t status) { - SQWSchStatus *tsch = NULL; - int32_t code = 0; - QW_ERR_RET(qwAcquireAddScheduler(mgmt, sId, QW_READ, &tsch)); - - QW_ERR_JRET(qwAddTaskStatusImpl(QW_FPARAMS(), tsch, 0, status, NULL)); - -_return: - - qwReleaseScheduler(QW_READ, mgmt); - - QW_RET(code); -} - -int32_t qwAddAcquireTaskStatus(QW_FPARAMS_DEF, int32_t rwType, SQWSchStatus *sch, int32_t status, - SQWTaskStatus **task) { - return qwAddTaskStatusImpl(QW_FPARAMS(), sch, rwType, status, task); -} - -void qwReleaseTaskStatus(int32_t rwType, SQWSchStatus *sch) { QW_UNLOCK(rwType, &sch->tasksLock); } - -int32_t qwAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) { - char id[sizeof(qId) + sizeof(tId)] = {0}; - QW_SET_QTID(id, qId, tId); - - *ctx = taosHashAcquire(mgmt->ctxHash, id, sizeof(id)); - if (NULL == (*ctx)) { - QW_TASK_DLOG_E("task ctx not exist, may be dropped"); - QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST); - } - - return TSDB_CODE_SUCCESS; -} - -int32_t qwGetTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) { - char id[sizeof(qId) + sizeof(tId)] = {0}; - QW_SET_QTID(id, qId, tId); - - *ctx = taosHashGet(mgmt->ctxHash, id, sizeof(id)); - if (NULL == (*ctx)) { - QW_TASK_DLOG_E("task ctx not exist, may be dropped"); - QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST); - } - - return TSDB_CODE_SUCCESS; -} - -int32_t qwAddTaskCtxImpl(QW_FPARAMS_DEF, bool acquire, SQWTaskCtx **ctx) { - char id[sizeof(qId) + sizeof(tId)] = {0}; - QW_SET_QTID(id, qId, tId); - - SQWTaskCtx nctx = {0}; - - int32_t code = taosHashPut(mgmt->ctxHash, id, sizeof(id), &nctx, sizeof(SQWTaskCtx)); - if (0 != code) { - if (HASH_NODE_EXIST(code)) { - if (acquire && ctx) { - QW_RET(qwAcquireTaskCtx(QW_FPARAMS(), ctx)); - } else if (ctx) { - QW_RET(qwGetTaskCtx(QW_FPARAMS(), ctx)); - } else { - QW_TASK_ELOG_E("task ctx already exist"); - QW_ERR_RET(TSDB_CODE_QRY_TASK_ALREADY_EXIST); - } - } else { - QW_TASK_ELOG("taosHashPut to ctxHash failed, error:%x", code); - QW_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - } - - if (acquire && ctx) { - QW_RET(qwAcquireTaskCtx(QW_FPARAMS(), ctx)); - } else if (ctx) { - QW_RET(qwGetTaskCtx(QW_FPARAMS(), ctx)); - } - - return TSDB_CODE_SUCCESS; -} - -int32_t qwAddTaskCtx(QW_FPARAMS_DEF) { QW_RET(qwAddTaskCtxImpl(QW_FPARAMS(), false, NULL)); } - -int32_t qwAddAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) { return qwAddTaskCtxImpl(QW_FPARAMS(), true, ctx); } - -void qwReleaseTaskCtx(SQWorker *mgmt, void *ctx) { taosHashRelease(mgmt->ctxHash, ctx); } - -void qwFreeTaskHandle(QW_FPARAMS_DEF, qTaskInfo_t *taskHandle) { - // Note: free/kill may in RC - qTaskInfo_t otaskHandle = atomic_load_ptr(taskHandle); - if (otaskHandle && atomic_val_compare_exchange_ptr(taskHandle, otaskHandle, NULL)) { - qDestroyTask(otaskHandle); - } -} - -int32_t qwKillTaskHandle(QW_FPARAMS_DEF, SQWTaskCtx *ctx) { - int32_t code = 0; - // Note: free/kill may in RC - qTaskInfo_t taskHandle = atomic_load_ptr(&ctx->taskHandle); - if (taskHandle && atomic_val_compare_exchange_ptr(&ctx->taskHandle, taskHandle, NULL)) { - code = qAsyncKillTask(taskHandle); - atomic_store_ptr(&ctx->taskHandle, taskHandle); - } - - QW_RET(code); -} - -void qwFreeTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx) { - tmsgReleaseHandle(&ctx->ctrlConnInfo, TAOS_CONN_SERVER); - ctx->ctrlConnInfo.handle = NULL; - ctx->ctrlConnInfo.refId = -1; - - // NO need to release dataConnInfo - - qwFreeTaskHandle(QW_FPARAMS(), &ctx->taskHandle); - - if (ctx->sinkHandle) { - dsDestroyDataSinker(ctx->sinkHandle); - ctx->sinkHandle = NULL; - } - - if (ctx->plan) { - nodesDestroyNode(ctx->plan); - ctx->plan = NULL; - } -} - -int32_t qwDropTaskCtx(QW_FPARAMS_DEF) { - char id[sizeof(qId) + sizeof(tId)] = {0}; - QW_SET_QTID(id, qId, tId); - SQWTaskCtx octx; - - SQWTaskCtx *ctx = taosHashGet(mgmt->ctxHash, id, sizeof(id)); - if (NULL == ctx) { - QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST); - } - - octx = *ctx; - - atomic_store_ptr(&ctx->taskHandle, NULL); - atomic_store_ptr(&ctx->sinkHandle, NULL); - atomic_store_ptr(&ctx->plan, NULL); - - QW_SET_EVENT_PROCESSED(ctx, QW_EVENT_DROP); - - if (taosHashRemove(mgmt->ctxHash, id, sizeof(id))) { - QW_TASK_ELOG_E("taosHashRemove from ctx hash failed"); - QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST); - } - - qwFreeTask(QW_FPARAMS(), &octx); - - QW_TASK_DLOG_E("task ctx dropped"); - - return TSDB_CODE_SUCCESS; -} - -int32_t qwDropTaskStatus(QW_FPARAMS_DEF) { - SQWSchStatus *sch = NULL; - SQWTaskStatus *task = NULL; - int32_t code = 0; - - char id[sizeof(qId) + sizeof(tId)] = {0}; - QW_SET_QTID(id, qId, tId); - - if (qwAcquireScheduler(mgmt, sId, QW_WRITE, &sch)) { - QW_TASK_WLOG_E("scheduler does not exist"); - return TSDB_CODE_SUCCESS; - } - - if (qwAcquireTaskStatus(QW_FPARAMS(), QW_WRITE, sch, &task)) { - qwReleaseScheduler(QW_WRITE, mgmt); - - QW_TASK_WLOG_E("task does not exist"); - return TSDB_CODE_SUCCESS; - } +int32_t qwProcessHbLinkBroken(SQWorker *mgmt, SQWMsg *qwMsg, SSchedulerHbReq *req) { + int32_t code = 0; + SSchedulerHbRsp rsp = {0}; + SQWSchStatus *sch = NULL; - if (taosHashRemove(sch->tasksHash, id, sizeof(id))) { - QW_TASK_ELOG_E("taosHashRemove task from hash failed"); - QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); - } + QW_ERR_RET(qwAcquireAddScheduler(mgmt, req->sId, QW_READ, &sch)); - QW_TASK_DLOG_E("task status dropped"); + QW_LOCK(QW_WRITE, &sch->hbConnLock); -_return: + if (qwMsg->connInfo.handle == sch->hbConnInfo.handle) { + tmsgReleaseHandle(&sch->hbConnInfo, TAOS_CONN_SERVER); + sch->hbConnInfo.handle = NULL; + sch->hbConnInfo.ahandle = NULL; - if (task) { - qwReleaseTaskStatus(QW_WRITE, sch); + QW_DLOG("release hb handle due to connection broken, handle:%p", qwMsg->connInfo.handle); + } else { + QW_DLOG("ignore hb connection broken, handle:%p, currentHandle:%p", qwMsg->connInfo.handle, sch->hbConnInfo.handle); } - qwReleaseScheduler(QW_WRITE, mgmt); - - QW_RET(code); -} - -int32_t qwUpdateTaskStatus(QW_FPARAMS_DEF, int8_t status) { - SQWSchStatus *sch = NULL; - SQWTaskStatus *task = NULL; - int32_t code = 0; - QW_ERR_RET(qwAcquireScheduler(mgmt, sId, QW_READ, &sch)); - QW_ERR_JRET(qwAcquireTaskStatus(QW_FPARAMS(), QW_READ, sch, &task)); - - QW_ERR_JRET(qwSetTaskStatus(QW_FPARAMS(), task, status)); - -_return: + QW_UNLOCK(QW_WRITE, &sch->hbConnLock); - if (task) { - qwReleaseTaskStatus(QW_READ, sch); - } qwReleaseScheduler(QW_READ, mgmt); - QW_RET(code); -} - -int32_t qwDropTask(QW_FPARAMS_DEF) { - QW_ERR_RET(qwDropTaskStatus(QW_FPARAMS())); - QW_ERR_RET(qwDropTaskCtx(QW_FPARAMS())); - - QW_TASK_DLOG_E("task is dropped"); - - return TSDB_CODE_SUCCESS; + QW_RET(TSDB_CODE_SUCCESS); } int32_t qwHandleTaskComplete(QW_FPARAMS_DEF, SQWTaskCtx *ctx) { - qTaskInfo_t *taskHandle = &ctx->taskHandle; + qTaskInfo_t taskHandle = ctx->taskHandle; - if (TASK_TYPE_TEMP == ctx->taskType) { + if (TASK_TYPE_TEMP == ctx->taskType && taskHandle) { if (ctx->explain) { SExplainExecInfo *execInfo = NULL; int32_t resNum = 0; - QW_ERR_RET(qGetExplainExecInfo(ctx->taskHandle, &resNum, &execInfo)); + QW_ERR_RET(qGetExplainExecInfo(taskHandle, &resNum, &execInfo)); SRpcHandleInfo connInfo = ctx->ctrlConnInfo; connInfo.ahandle = NULL; QW_ERR_RET(qwBuildAndSendExplainRsp(&connInfo, execInfo, resNum)); } - - qwFreeTaskHandle(QW_FPARAMS(), taskHandle); } return TSDB_CODE_SUCCESS; @@ -554,16 +67,21 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryEnd) { uint64_t useconds = 0; int32_t i = 0; int32_t execNum = 0; - qTaskInfo_t *taskHandle = &ctx->taskHandle; + qTaskInfo_t taskHandle = ctx->taskHandle; DataSinkHandle sinkHandle = ctx->sinkHandle; while (true) { QW_TASK_DLOG("start to execTask, loopIdx:%d", i++); - code = qExecTask(*taskHandle, &pRes, &useconds); - if (code) { - QW_TASK_ELOG("qExecTask failed, code:%x - %s", code, tstrerror(code)); - QW_ERR_RET(code); + pRes = NULL; + + // if *taskHandle is NULL, it's killed right now + if (taskHandle) { + code = qExecTask(taskHandle, &pRes, &useconds); + if (code) { + QW_TASK_ELOG("qExecTask failed, code:%x - %s", code, tstrerror(code)); + QW_ERR_RET(code); + } } ++execNum; @@ -719,23 +237,9 @@ int32_t qwGetResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen, void } -void qwSaveTbVersionInfo(qTaskInfo_t pTaskInfo, SQWTaskCtx *ctx) { - char dbFName[TSDB_DB_FNAME_LEN]; - char tbName[TSDB_TABLE_NAME_LEN]; - - qGetQueriedTableSchemaVersion(pTaskInfo, dbFName, tbName, &ctx->tbInfo.sversion, &ctx->tbInfo.tversion); - - if (dbFName[0] && tbName[0]) { - sprintf(ctx->tbInfo.tbFName, "%s.%s", dbFName, tbName); - } else { - ctx->tbInfo.tbFName[0] = 0; - } -} - int32_t qwHandlePrePhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *input, SQWPhaseOutput *output) { int32_t code = 0; SQWTaskCtx *ctx = NULL; - SRpcHandleInfo *dropConnection = NULL; SRpcHandleInfo *cancelConnection = NULL; QW_TASK_DLOG("start to handle event at phase %s", qwPhaseStr(phase)); @@ -768,12 +272,10 @@ int32_t qwHandlePrePhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inpu } if (QW_IS_EVENT_RECEIVED(ctx, QW_EVENT_DROP)) { - dropConnection = &ctx->ctrlConnInfo; QW_ERR_JRET(qwDropTask(QW_FPARAMS())); - dropConnection = NULL; - qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code); - QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code)); + //qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code); + //QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code)); QW_ERR_JRET(TSDB_CODE_QRY_TASK_DROPPED); break; @@ -806,12 +308,10 @@ int32_t qwHandlePrePhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inpu } if (QW_IS_EVENT_RECEIVED(ctx, QW_EVENT_DROP)) { - dropConnection = &ctx->ctrlConnInfo; QW_ERR_JRET(qwDropTask(QW_FPARAMS())); - dropConnection = NULL; - qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code); - QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code)); + //qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code); + //QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code)); QW_ERR_JRET(TSDB_CODE_QRY_TASK_DROPPED); } @@ -836,11 +336,6 @@ _return: qwReleaseTaskCtx(mgmt, ctx); } - if (dropConnection) { - qwBuildAndSendDropRsp(dropConnection, code); - QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", dropConnection->handle, code, tstrerror(code)); - } - if (cancelConnection) { qwBuildAndSendCancelRsp(cancelConnection, code); QW_TASK_DLOG("cancel rsp send, handle:%p, code:%x - %s", cancelConnection->handle, code, tstrerror(code)); @@ -859,7 +354,7 @@ int32_t qwHandlePostPhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inp int32_t code = 0; SQWTaskCtx *ctx = NULL; SRpcHandleInfo connInfo = {0}; - SRpcHandleInfo *readyConnection = NULL; + SRpcHandleInfo *rspConnection = NULL; QW_TASK_DLOG("start to handle event at phase %s", qwPhaseStr(phase)); @@ -880,7 +375,7 @@ int32_t qwHandlePostPhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inp } #else connInfo = ctx->ctrlConnInfo; - readyConnection = &connInfo; + rspConnection = &connInfo; QW_SET_EVENT_PROCESSED(ctx, QW_EVENT_READY); #endif @@ -892,8 +387,8 @@ int32_t qwHandlePostPhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inp QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); } - qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code); - QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code)); + //qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code); + //QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code)); QW_ERR_JRET(qwDropTask(QW_FPARAMS())); QW_ERR_JRET(TSDB_CODE_QRY_TASK_DROPPED); @@ -913,9 +408,9 @@ _return: qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_PARTIAL_SUCCEED); } - if (readyConnection) { - qwBuildAndSendReadyRsp(readyConnection, code, ctx ? &ctx->tbInfo : NULL); - QW_TASK_DLOG("ready msg rsped, handle:%p, code:%x - %s", readyConnection->handle, code, tstrerror(code)); + if (rspConnection) { + qwBuildAndSendQueryRsp(rspConnection, code, ctx ? &ctx->tbInfo : NULL); + QW_TASK_DLOG("ready msg rsped, handle:%p, code:%x - %s", rspConnection->handle, code, tstrerror(code)); } if (ctx) { @@ -1006,69 +501,6 @@ _return: QW_RET(TSDB_CODE_SUCCESS); } -int32_t qwProcessReady(QW_FPARAMS_DEF, SQWMsg *qwMsg) { - int32_t code = 0; - SQWTaskCtx *ctx = NULL; - int8_t phase = 0; - bool needRsp = true; - - QW_ERR_JRET(qwAcquireTaskCtx(QW_FPARAMS(), &ctx)); - - QW_LOCK(QW_WRITE, &ctx->lock); - - if (QW_IS_EVENT_PROCESSED(ctx, QW_EVENT_DROP) || QW_IS_EVENT_RECEIVED(ctx, QW_EVENT_DROP)) { - QW_TASK_WLOG_E("task is dropping or already dropped"); - QW_ERR_JRET(TSDB_CODE_QRY_TASK_DROPPED); - } - - if (ctx->phase == QW_PHASE_PRE_QUERY) { - ctx->ctrlConnInfo = qwMsg->connInfo; - QW_SET_EVENT_RECEIVED(ctx, QW_EVENT_READY); - needRsp = false; - QW_TASK_DLOG_E("ready msg will not rsp now"); - goto _return; - } - - QW_SET_EVENT_PROCESSED(ctx, QW_EVENT_READY); - - if (atomic_load_8((int8_t *)&ctx->queryEnd) || atomic_load_8((int8_t *)&ctx->queryFetched)) { - QW_TASK_ELOG("got ready msg at wrong status, queryEnd:%d, queryFetched:%d", atomic_load_8((int8_t *)&ctx->queryEnd), - atomic_load_8((int8_t *)&ctx->queryFetched)); - QW_ERR_JRET(TSDB_CODE_QW_MSG_ERROR); - } - - if (ctx->phase == QW_PHASE_POST_QUERY) { - code = ctx->rspCode; - goto _return; - } - - QW_TASK_ELOG("invalid phase when got ready msg, phase:%s", qwPhaseStr(ctx->phase)); - - QW_ERR_JRET(TSDB_CODE_QRY_TASK_STATUS_ERROR); - -_return: - - if (code && ctx) { - QW_UPDATE_RSP_CODE(ctx, code); - } - - if (code) { - qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_FAILED); - } - - if (ctx) { - QW_UNLOCK(QW_WRITE, &ctx->lock); - qwReleaseTaskCtx(mgmt, ctx); - } - - if (needRsp) { - qwBuildAndSendReadyRsp(&qwMsg->connInfo, code, NULL); - QW_TASK_DLOG("ready msg rsped, handle:%p, code:%x - %s", qwMsg->connInfo.handle, code, tstrerror(code)); - } - - QW_RET(TSDB_CODE_SUCCESS); -} - int32_t qwProcessCQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) { SQWTaskCtx *ctx = NULL; int32_t code = 0; @@ -1242,11 +674,6 @@ int32_t qwProcessDrop(QW_FPARAMS_DEF, SQWMsg *qwMsg) { QW_ERR_JRET(qwKillTaskHandle(QW_FPARAMS(), ctx)); qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_DROPPING); } else if (ctx->phase > 0) { - if (0 == qwMsg->code) { - qwBuildAndSendDropRsp(&qwMsg->connInfo, code); - QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", qwMsg->connInfo.handle, code, tstrerror(code)); - } - QW_ERR_JRET(qwDropTask(QW_FPARAMS())); rsped = true; } else { @@ -1277,37 +704,6 @@ _return: qwReleaseTaskCtx(mgmt, ctx); } - if ((TSDB_CODE_SUCCESS != code) && (0 == qwMsg->code)) { - qwBuildAndSendDropRsp(&qwMsg->connInfo, code); - QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", qwMsg->connInfo.handle, code, tstrerror(code)); - } - - QW_RET(TSDB_CODE_SUCCESS); -} - -int32_t qwProcessHbLinkBroken(SQWorker *mgmt, SQWMsg *qwMsg, SSchedulerHbReq *req) { - int32_t code = 0; - SSchedulerHbRsp rsp = {0}; - SQWSchStatus *sch = NULL; - - QW_ERR_RET(qwAcquireAddScheduler(mgmt, req->sId, QW_READ, &sch)); - - QW_LOCK(QW_WRITE, &sch->hbConnLock); - - if (qwMsg->connInfo.handle == sch->hbConnInfo.handle) { - tmsgReleaseHandle(&sch->hbConnInfo, TAOS_CONN_SERVER); - sch->hbConnInfo.handle = NULL; - sch->hbConnInfo.ahandle = NULL; - - QW_DLOG("release hb handle due to connection broken, handle:%p", qwMsg->connInfo.handle); - } else { - QW_DLOG("ignore hb connection broken, handle:%p, currentHandle:%p", qwMsg->connInfo.handle, sch->hbConnInfo.handle); - } - - QW_UNLOCK(QW_WRITE, &sch->hbConnLock); - - qwReleaseScheduler(QW_READ, mgmt); - QW_RET(TSDB_CODE_SUCCESS); } @@ -1406,7 +802,7 @@ void qwProcessHbTimerEvent(void *param, void *tmrId) { SQWSchStatus *sch = (SQWSchStatus *)pIter; if (NULL == sch->hbConnInfo.handle) { uint64_t *sId = taosHashGetKey(pIter, NULL); - QW_DLOG("cancel send hb to sch %" PRIx64 " cause of no connection handle", *sId); + QW_TLOG("cancel send hb to sch %" PRIx64 " cause of no connection handle", *sId); pIter = taosHashIterate(mgmt->schHash, pIter); continue; } @@ -1438,81 +834,6 @@ _return: qwRelease(refId); } -void qwCloseRef(void) { - taosWLockLatch(&gQwMgmt.lock); - if (atomic_load_32(&gQwMgmt.qwNum) <= 0 && gQwMgmt.qwRef >= 0) { - taosCloseRef(gQwMgmt.qwRef); - gQwMgmt.qwRef = -1; - } - taosWUnLockLatch(&gQwMgmt.lock); -} - -void qwDestroySchStatus(SQWSchStatus *pStatus) { taosHashCleanup(pStatus->tasksHash); } - -void qwDestroyImpl(void *pMgmt) { - SQWorker *mgmt = (SQWorker *)pMgmt; - - taosTmrStopA(&mgmt->hbTimer); - taosTmrCleanUp(mgmt->timer); - - // TODO STOP ALL QUERY - - // TODO FREE ALL - - taosHashCleanup(mgmt->ctxHash); - - void *pIter = taosHashIterate(mgmt->schHash, NULL); - while (pIter) { - SQWSchStatus *sch = (SQWSchStatus *)pIter; - qwDestroySchStatus(sch); - pIter = taosHashIterate(mgmt->schHash, pIter); - } - taosHashCleanup(mgmt->schHash); - - taosMemoryFree(mgmt); - - atomic_sub_fetch_32(&gQwMgmt.qwNum, 1); - - qwCloseRef(); -} - -int32_t qwOpenRef(void) { - taosWLockLatch(&gQwMgmt.lock); - if (gQwMgmt.qwRef < 0) { - gQwMgmt.qwRef = taosOpenRef(100, qwDestroyImpl); - if (gQwMgmt.qwRef < 0) { - taosWUnLockLatch(&gQwMgmt.lock); - qError("init qworker ref failed"); - QW_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - } - taosWUnLockLatch(&gQwMgmt.lock); - - return TSDB_CODE_SUCCESS; -} - -void qwSetHbParam(int64_t refId, SQWHbParam **pParam) { - int32_t paramIdx = 0; - int32_t newParamIdx = 0; - - while (true) { - paramIdx = atomic_load_32(&gQwMgmt.paramIdx); - if (paramIdx == tListLen(gQwMgmt.param)) { - newParamIdx = 0; - } else { - newParamIdx = paramIdx + 1; - } - - if (paramIdx == atomic_val_compare_exchange_32(&gQwMgmt.paramIdx, paramIdx, newParamIdx)) { - break; - } - } - - gQwMgmt.param[paramIdx].qwrId = gQwMgmt.qwRef; - gQwMgmt.param[paramIdx].refId = refId; - - *pParam = &gQwMgmt.param[paramIdx]; -} int32_t qWorkerInit(int8_t nodeType, int32_t nodeId, SQWorkerCfg *cfg, void **qWorkerMgmt, const SMsgCb *pMsgCb) { if (NULL == qWorkerMgmt || pMsgCb->mgmt == NULL) { @@ -1629,146 +950,9 @@ void qWorkerDestroy(void **qWorkerMgmt) { } } -int32_t qwGetSchTasksStatus(SQWorker *mgmt, uint64_t sId, SSchedulerStatusRsp **rsp) { - /* - SQWSchStatus *sch = NULL; - int32_t taskNum = 0; - - QW_ERR_RET(qwAcquireScheduler(mgmt, sId, QW_READ, &sch)); - - sch->lastAccessTs = taosGetTimestampSec(); - - QW_LOCK(QW_READ, &sch->tasksLock); - - taskNum = taosHashGetSize(sch->tasksHash); - - int32_t size = sizeof(SSchedulerStatusRsp) + sizeof((*rsp)->status[0]) * taskNum; - *rsp = taosMemoryCalloc(1, size); - if (NULL == *rsp) { - QW_SCH_ELOG("calloc %d failed", size); - QW_UNLOCK(QW_READ, &sch->tasksLock); - qwReleaseScheduler(QW_READ, mgmt); - - return TSDB_CODE_QRY_OUT_OF_MEMORY; - } - - void *key = NULL; - size_t keyLen = 0; - int32_t i = 0; - - void *pIter = taosHashIterate(sch->tasksHash, NULL); - while (pIter) { - SQWTaskStatus *taskStatus = (SQWTaskStatus *)pIter; - taosHashGetKey(pIter, &key, &keyLen); - - QW_GET_QTID(key, (*rsp)->status[i].queryId, (*rsp)->status[i].taskId); - (*rsp)->status[i].status = taskStatus->status; - - ++i; - pIter = taosHashIterate(sch->tasksHash, pIter); - } - - QW_UNLOCK(QW_READ, &sch->tasksLock); - qwReleaseScheduler(QW_READ, mgmt); - - (*rsp)->num = taskNum; - */ - return TSDB_CODE_SUCCESS; -} - -int32_t qwUpdateSchLastAccess(SQWorker *mgmt, uint64_t sId, uint64_t qId, uint64_t tId) { - SQWSchStatus *sch = NULL; - - /* - QW_ERR_RET(qwAcquireScheduler(QW_READ, mgmt, sId, &sch)); - - sch->lastAccessTs = taosGetTimestampSec(); - - qwReleaseScheduler(QW_READ, mgmt); - */ - return TSDB_CODE_SUCCESS; +int64_t qWorkerGetWaitTimeInQueue(void *qWorkerMgmt, EQueueType type) { + return qwGetWaitTimeInQueue((SQWorker *)qWorkerMgmt, type); } -int32_t qwGetTaskStatus(SQWorker *mgmt, uint64_t sId, uint64_t qId, uint64_t tId, int8_t *taskStatus) { - SQWSchStatus *sch = NULL; - SQWTaskStatus *task = NULL; - int32_t code = 0; - /* - if (qwAcquireScheduler(QW_READ, mgmt, sId, &sch)) { - *taskStatus = JOB_TASK_STATUS_NULL; - return TSDB_CODE_SUCCESS; - } - - if (qwAcquireTask(mgmt, QW_READ, sch, queryId, taskId, &task)) { - qwReleaseScheduler(QW_READ, mgmt); - - *taskStatus = JOB_TASK_STATUS_NULL; - return TSDB_CODE_SUCCESS; - } - - *taskStatus = task->status; - - qwReleaseTask(QW_READ, sch); - qwReleaseScheduler(QW_READ, mgmt); - */ - - QW_RET(code); -} - -int32_t qwCancelTask(SQWorker *mgmt, uint64_t sId, uint64_t qId, uint64_t tId) { - SQWSchStatus *sch = NULL; - SQWTaskStatus *task = NULL; - int32_t code = 0; - /* - QW_ERR_RET(qwAcquireAddScheduler(QW_READ, mgmt, sId, &sch)); - - QW_ERR_JRET(qwAcquireAddTask(mgmt, QW_READ, sch, qId, tId, JOB_TASK_STATUS_NOT_START, &task)); - - - QW_LOCK(QW_WRITE, &task->lock); - - task->cancel = true; - - int8_t oriStatus = task->status; - int8_t newStatus = 0; - - if (task->status == JOB_TASK_STATUS_CANCELLED || task->status == JOB_TASK_STATUS_NOT_START || task->status == - JOB_TASK_STATUS_CANCELLING || task->status == JOB_TASK_STATUS_DROPPING) { QW_UNLOCK(QW_WRITE, &task->lock); - - qwReleaseTask(QW_READ, sch); - qwReleaseScheduler(QW_READ, mgmt); - - return TSDB_CODE_SUCCESS; - } else if (task->status == JOB_TASK_STATUS_FAILED || task->status == JOB_TASK_STATUS_SUCCEED || task->status == - JOB_TASK_STATUS_PARTIAL_SUCCEED) { QW_ERR_JRET(qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_CANCELLED)); } else { - QW_ERR_JRET(qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_CANCELLING)); - } - - QW_UNLOCK(QW_WRITE, &task->lock); - - qwReleaseTask(QW_READ, sch); - qwReleaseScheduler(QW_READ, mgmt); - - if (oriStatus == JOB_TASK_STATUS_EXECUTING) { - //TODO call executer to cancel subquery async - } - - return TSDB_CODE_SUCCESS; - - _return: - - if (task) { - QW_UNLOCK(QW_WRITE, &task->lock); - - qwReleaseTask(QW_READ, sch); - } - - if (sch) { - qwReleaseScheduler(QW_READ, mgmt); - } - */ - - QW_RET(code); -} diff --git a/source/libs/qworker/test/qworkerTests.cpp b/source/libs/qworker/test/qworkerTests.cpp index b573828e7694cc2f19ddd2e31fa9b34b590fc6ed..1b959fbe633e0c50ddc7b80af321ee0420a9616d 100644 --- a/source/libs/qworker/test/qworkerTests.cpp +++ b/source/libs/qworker/test/qworkerTests.cpp @@ -127,15 +127,6 @@ void qwtBuildQueryReqMsg(SRpcMsg *queryRpc) { queryRpc->contLen = sizeof(SSubQueryMsg) + 100; } -void qwtBuildReadyReqMsg(SResReadyReq *readyMsg, SRpcMsg *readyRpc) { - readyMsg->sId = htobe64(1); - readyMsg->queryId = htobe64(atomic_load_64(&qwtTestQueryId)); - readyMsg->taskId = htobe64(1); - readyRpc->msgType = TDMT_VND_RES_READY; - readyRpc->pCont = readyMsg; - readyRpc->contLen = sizeof(SResReadyReq); -} - void qwtBuildFetchReqMsg(SResFetchReq *fetchMsg, SRpcMsg *fetchRpc) { fetchMsg->sId = htobe64(1); fetchMsg->queryId = htobe64(atomic_load_64(&qwtTestQueryId)); @@ -154,13 +145,6 @@ void qwtBuildDropReqMsg(STaskDropReq *dropMsg, SRpcMsg *dropRpc) { dropRpc->contLen = sizeof(STaskDropReq); } -void qwtBuildStatusReqMsg(SSchTasksStatusReq *statusMsg, SRpcMsg *statusRpc) { - statusMsg->sId = htobe64(1); - statusRpc->pCont = statusMsg; - statusRpc->contLen = sizeof(SSchTasksStatusReq); - statusRpc->msgType = TDMT_VND_TASKS_STATUS; -} - int32_t qwtStringToPlan(const char* str, SSubplan** subplan) { *subplan = (SSubplan *)0x1; return 0; @@ -222,10 +206,7 @@ void qwtRpcSendResponse(const SRpcMsg *pRsp) { case TDMT_VND_QUERY_RSP: { SQueryTableRsp *rsp = (SQueryTableRsp *)pRsp->pCont; - if (0 == pRsp->code) { - qwtBuildReadyReqMsg(&qwtreadyMsg, &qwtreadyRpc); - qwtPutReqToFetchQueue((void *)0x1, &qwtreadyRpc); - } else { + if (pRsp->code) { qwtBuildDropReqMsg(&qwtdropMsg, &qwtdropRpc); qwtPutReqToFetchQueue((void *)0x1, &qwtdropRpc); } @@ -233,19 +214,6 @@ void qwtRpcSendResponse(const SRpcMsg *pRsp) { rpcFreeCont(rsp); break; } - case TDMT_VND_RES_READY_RSP: { - SResReadyRsp *rsp = (SResReadyRsp *)pRsp->pCont; - - if (0 == pRsp->code) { - qwtBuildFetchReqMsg(&qwtfetchMsg, &qwtfetchRpc); - qwtPutReqToFetchQueue((void *)0x1, &qwtfetchRpc); - } else { - qwtBuildDropReqMsg(&qwtdropMsg, &qwtdropRpc); - qwtPutReqToFetchQueue((void *)0x1, &qwtdropRpc); - } - rpcFreeCont(rsp); - break; - } case TDMT_VND_FETCH_RSP: { SRetrieveTableRsp *rsp = (SRetrieveTableRsp *)pRsp->pCont; @@ -667,7 +635,7 @@ void *queryThread(void *param) { while (!qwtTestStop) { qwtBuildQueryReqMsg(&queryRpc); - qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc); + qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc, 0); if (qwtTestEnableSleep) { taosUsleep(taosRand()%5); } @@ -679,28 +647,6 @@ void *queryThread(void *param) { return NULL; } -void *readyThread(void *param) { - SRpcMsg readyRpc = {0}; - int32_t code = 0; - uint32_t n = 0; - void *mockPointer = (void *)0x1; - void *mgmt = param; - SResReadyReq readyMsg = {0}; - - while (!qwtTestStop) { - qwtBuildReadyReqMsg(&readyMsg, &readyRpc); - code = qWorkerProcessReadyMsg(mockPointer, mgmt, &readyRpc); - if (qwtTestEnableSleep) { - taosUsleep(taosRand()%5); - } - if (++n % qwtTestPrintNum == 0) { - printf("ready:%d\n", n); - } - } - - return NULL; -} - void *fetchThread(void *param) { SRpcMsg fetchRpc = {0}; int32_t code = 0; @@ -711,7 +657,7 @@ void *fetchThread(void *param) { while (!qwtTestStop) { qwtBuildFetchReqMsg(&fetchMsg, &fetchRpc); - code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc); + code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc, 0); if (qwtTestEnableSleep) { taosUsleep(taosRand()%5); } @@ -733,7 +679,7 @@ void *dropThread(void *param) { while (!qwtTestStop) { qwtBuildDropReqMsg(&dropMsg, &dropRpc); - code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc); + code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc, 0); if (qwtTestEnableSleep) { taosUsleep(taosRand()%5); } @@ -745,29 +691,6 @@ void *dropThread(void *param) { return NULL; } -void *statusThread(void *param) { - SRpcMsg statusRpc = {0}; - int32_t code = 0; - uint32_t n = 0; - void *mockPointer = (void *)0x1; - void *mgmt = param; - SSchTasksStatusReq statusMsg = {0}; - - while (!qwtTestStop) { - qwtBuildStatusReqMsg(&statusMsg, &statusRpc); - code = qWorkerProcessStatusMsg(mockPointer, mgmt, &statusRpc); - if (qwtTestEnableSleep) { - taosUsleep(taosRand()%5); - } - if (++n % qwtTestPrintNum == 0) { - printf("status:%d\n", n); - } - } - - return NULL; -} - - void *qwtclientThread(void *param) { int32_t code = 0; uint32_t n = 0; @@ -835,9 +758,9 @@ void *queryQueueThread(void *param) { } if (TDMT_VND_QUERY == queryRpc->msgType) { - qWorkerProcessQueryMsg(mockPointer, mgmt, queryRpc); + qWorkerProcessQueryMsg(mockPointer, mgmt, queryRpc, 0); } else if (TDMT_VND_QUERY_CONTINUE == queryRpc->msgType) { - qWorkerProcessCQueryMsg(mockPointer, mgmt, queryRpc); + qWorkerProcessCQueryMsg(mockPointer, mgmt, queryRpc, 0); } else { printf("unknown msg in query queue, type:%d\n", queryRpc->msgType); assert(0); @@ -892,19 +815,13 @@ void *fetchQueueThread(void *param) { switch (fetchRpc->msgType) { case TDMT_VND_FETCH: - qWorkerProcessFetchMsg(mockPointer, mgmt, fetchRpc); - break; - case TDMT_VND_RES_READY: - qWorkerProcessReadyMsg(mockPointer, mgmt, fetchRpc); - break; - case TDMT_VND_TASKS_STATUS: - qWorkerProcessStatusMsg(mockPointer, mgmt, fetchRpc); + qWorkerProcessFetchMsg(mockPointer, mgmt, fetchRpc, 0); break; case TDMT_VND_CANCEL_TASK: - qWorkerProcessCancelMsg(mockPointer, mgmt, fetchRpc); + qWorkerProcessCancelMsg(mockPointer, mgmt, fetchRpc, 0); break; case TDMT_VND_DROP_TASK: - qWorkerProcessDropMsg(mockPointer, mgmt, fetchRpc); + qWorkerProcessDropMsg(mockPointer, mgmt, fetchRpc, 0); break; default: printf("unknown msg type:%d in fetch queue", fetchRpc->msgType); @@ -934,15 +851,12 @@ TEST(seqTest, normalCase) { int32_t code = 0; void *mockPointer = (void *)0x1; SRpcMsg queryRpc = {0}; - SRpcMsg readyRpc = {0}; SRpcMsg fetchRpc = {0}; SRpcMsg dropRpc = {0}; - SRpcMsg statusRpc = {0}; qwtInitLogFile(); qwtBuildQueryReqMsg(&queryRpc); - qwtBuildReadyReqMsg(&qwtreadyMsg, &readyRpc); qwtBuildFetchReqMsg(&qwtfetchMsg, &fetchRpc); qwtBuildDropReqMsg(&qwtdropMsg, &dropRpc); @@ -964,20 +878,16 @@ TEST(seqTest, normalCase) { code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, &msgCb); ASSERT_EQ(code, 0); - code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc); + code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc, 0); ASSERT_EQ(code, 0); //code = qWorkerProcessReadyMsg(mockPointer, mgmt, &readyRpc); //ASSERT_EQ(code, 0); - code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc); - ASSERT_EQ(code, 0); - - code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc); + code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc, 0); ASSERT_EQ(code, 0); - qwtBuildStatusReqMsg(&qwtstatusMsg, &statusRpc); - code = qWorkerProcessStatusMsg(mockPointer, mgmt, &statusRpc); + code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc, 0); ASSERT_EQ(code, 0); qWorkerDestroy(&mgmt); @@ -989,13 +899,11 @@ TEST(seqTest, cancelFirst) { void *mockPointer = (void *)0x1; SRpcMsg queryRpc = {0}; SRpcMsg dropRpc = {0}; - SRpcMsg statusRpc = {0}; qwtInitLogFile(); qwtBuildQueryReqMsg(&queryRpc); qwtBuildDropReqMsg(&qwtdropMsg, &dropRpc); - qwtBuildStatusReqMsg(&qwtstatusMsg, &statusRpc); stubSetStringToPlan(); stubSetRpcSendResponse(); @@ -1006,24 +914,12 @@ TEST(seqTest, cancelFirst) { code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, &msgCb); ASSERT_EQ(code, 0); - qwtBuildStatusReqMsg(&qwtstatusMsg, &statusRpc); - code = qWorkerProcessStatusMsg(mockPointer, mgmt, &statusRpc); - ASSERT_EQ(code, 0); - - code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc); + code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc, 0); ASSERT_EQ(code, 0); - qwtBuildStatusReqMsg(&qwtstatusMsg, &statusRpc); - code = qWorkerProcessStatusMsg(mockPointer, mgmt, &statusRpc); - ASSERT_EQ(code, 0); - - code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc); + code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc, 0); ASSERT_TRUE(0 != code); - qwtBuildStatusReqMsg(&qwtstatusMsg, &statusRpc); - code = qWorkerProcessStatusMsg(mockPointer, mgmt, &statusRpc); - ASSERT_EQ(code, 0); - qWorkerDestroy(&mgmt); } @@ -1063,7 +959,7 @@ TEST(seqTest, randCase) { if (r >= 0 && r < maxr/5) { printf("Query,%d\n", t++); qwtBuildQueryReqMsg(&queryRpc); - code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc); + code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc, 0); } else if (r >= maxr/5 && r < maxr * 2/5) { //printf("Ready,%d\n", t++); //qwtBuildReadyReqMsg(&readyMsg, &readyRpc); @@ -1074,22 +970,19 @@ TEST(seqTest, randCase) { } else if (r >= maxr * 2/5 && r < maxr* 3/5) { printf("Fetch,%d\n", t++); qwtBuildFetchReqMsg(&fetchMsg, &fetchRpc); - code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc); + code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc, 0); if (qwtTestEnableSleep) { taosUsleep(1); } } else if (r >= maxr * 3/5 && r < maxr * 4/5) { printf("Drop,%d\n", t++); qwtBuildDropReqMsg(&dropMsg, &dropRpc); - code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc); + code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc, 0); if (qwtTestEnableSleep) { taosUsleep(1); } } else if (r >= maxr * 4/5 && r < maxr-1) { printf("Status,%d\n", t++); - qwtBuildStatusReqMsg(&statusMsg, &statusRpc); - code = qWorkerProcessStatusMsg(mockPointer, mgmt, &statusRpc); - ASSERT_EQ(code, 0); if (qwtTestEnableSleep) { taosUsleep(1); } @@ -1137,7 +1030,6 @@ TEST(seqTest, multithreadRand) { //taosThreadCreate(&(t2), &thattr, readyThread, NULL); taosThreadCreate(&(t3), &thattr, fetchThread, NULL); taosThreadCreate(&(t4), &thattr, dropThread, NULL); - taosThreadCreate(&(t5), &thattr, statusThread, NULL); taosThreadCreate(&(t6), &thattr, fetchQueueThread, mgmt); while (true) { diff --git a/source/libs/scalar/inc/sclInt.h b/source/libs/scalar/inc/sclInt.h index 9dbfeceb5940d4237ead01ff445529c2d7d447ac..1c2e4a358a2c256cf3ed577be568c2e93fe13cbe 100644 --- a/source/libs/scalar/inc/sclInt.h +++ b/source/libs/scalar/inc/sclInt.h @@ -51,7 +51,7 @@ typedef struct SScalarCtx { int32_t doConvertDataType(SValueNode* pValueNode, SScalarParam* out); SColumnInfoData* createColumnInfoData(SDataType* pType, int32_t numOfRows); -void sclConvertToTsValueNode(int8_t precision, SValueNode* valueNode); +int32_t sclConvertToTsValueNode(int8_t precision, SValueNode* valueNode); #define GET_PARAM_TYPE(_c) ((_c)->columnData->info.type) #define GET_PARAM_BYTES(_c) ((_c)->columnData->info.bytes) diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c index 4317ad325e7e0d7b468dd7929c1f4a7c9ff7c169..195ec8a57791062cbca0e4c1a39ccce1866a5095 100644 --- a/source/libs/scalar/src/filter.c +++ b/source/libs/scalar/src/filter.c @@ -3553,7 +3553,11 @@ EDealRes fltReviseRewriter(SNode** pNode, void* pContext) { return DEAL_RES_CONTINUE; } - sclConvertToTsValueNode(stat->precision, valueNode); + int32_t code = sclConvertToTsValueNode(stat->precision, valueNode); + if (code) { + stat->code = code; + return DEAL_RES_ERROR; + } return DEAL_RES_CONTINUE; } @@ -3687,7 +3691,7 @@ int32_t fltReviseNodes(SFilterInfo *pInfo, SNode** pNode, SFltTreeStat *pStat) { for (int32_t i = 0; i < nodeNum; ++i) { SValueNode *valueNode = *(SValueNode **)taosArrayGet(pStat->nodeList, i); - sclConvertToTsValueNode(pStat->precision, valueNode); + FLT_ERR_JRET(sclConvertToTsValueNode(pStat->precision, valueNode)); } _return: diff --git a/source/libs/scalar/src/scalar.c b/source/libs/scalar/src/scalar.c index 49ed3ab48bfc96a5f081e28fd806457c807958a9..d2436b9948f2cf7bfa15d061cdc9bbfdfefd6f08 100644 --- a/source/libs/scalar/src/scalar.c +++ b/source/libs/scalar/src/scalar.c @@ -20,17 +20,19 @@ int32_t scalarGetOperatorParamNum(EOperatorType type) { return 2; } -void sclConvertToTsValueNode(int8_t precision, SValueNode* valueNode) { +int32_t sclConvertToTsValueNode(int8_t precision, SValueNode* valueNode) { char *timeStr = valueNode->datum.p; - if (convertStringToTimestamp(valueNode->node.resType.type, valueNode->datum.p, precision, &valueNode->datum.i) != - TSDB_CODE_SUCCESS) { - valueNode->datum.i = 0; + int32_t code = convertStringToTimestamp(valueNode->node.resType.type, valueNode->datum.p, precision, &valueNode->datum.i); + if (code != TSDB_CODE_SUCCESS) { + return code; } taosMemoryFree(timeStr); valueNode->typeData = valueNode->datum.i; valueNode->node.resType.type = TSDB_DATA_TYPE_TIMESTAMP; valueNode->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes; + + return TSDB_CODE_SUCCESS; } @@ -182,6 +184,11 @@ int32_t sclCopyValueNodeValue(SValueNode *pNode, void **res) { int32_t sclInitParam(SNode* node, SScalarParam *param, SScalarCtx *ctx, int32_t *rowNum) { switch (nodeType(node)) { + case QUERY_NODE_LEFT_VALUE: { + SSDataBlock* pb = taosArrayGetP(ctx->pBlockList, 0); + param->numOfRows = pb->info.rows; + break; + } case QUERY_NODE_VALUE: { SValueNode *valueNode = (SValueNode *)node; @@ -541,6 +548,7 @@ EDealRes sclRewriteBasedOnOptr(SNode** pNode, SScalarCtx *ctx, EOperatorType opT EDealRes sclRewriteNonConstOperator(SNode** pNode, SScalarCtx *ctx) { SOperatorNode *node = (SOperatorNode *)*pNode; + int32_t code = 0; if (node->pLeft && (QUERY_NODE_VALUE == nodeType(node->pLeft))) { SValueNode *valueNode = (SValueNode *)node->pLeft; @@ -550,7 +558,11 @@ EDealRes sclRewriteNonConstOperator(SNode** pNode, SScalarCtx *ctx) { if (IS_STR_DATA_TYPE(valueNode->node.resType.type) && node->pRight && nodesIsExprNode(node->pRight) && ((SExprNode*)node->pRight)->resType.type == TSDB_DATA_TYPE_TIMESTAMP) { - sclConvertToTsValueNode(((SExprNode*)node->pRight)->resType.precision, valueNode); + code = sclConvertToTsValueNode(((SExprNode*)node->pRight)->resType.precision, valueNode); + if (code) { + ctx->code = code; + return DEAL_RES_ERROR; + } } } @@ -562,7 +574,11 @@ EDealRes sclRewriteNonConstOperator(SNode** pNode, SScalarCtx *ctx) { if (IS_STR_DATA_TYPE(valueNode->node.resType.type) && node->pLeft && nodesIsExprNode(node->pLeft) && ((SExprNode*)node->pLeft)->resType.type == TSDB_DATA_TYPE_TIMESTAMP) { - sclConvertToTsValueNode(((SExprNode*)node->pLeft)->resType.precision, valueNode); + code = sclConvertToTsValueNode(((SExprNode*)node->pLeft)->resType.precision, valueNode); + if (code) { + ctx->code = code; + return DEAL_RES_ERROR; + } } } @@ -845,7 +861,7 @@ EDealRes sclWalkTarget(SNode* pNode, SScalarCtx *ctx) { } EDealRes sclCalcWalker(SNode* pNode, void* pContext) { - if (QUERY_NODE_VALUE == nodeType(pNode) || QUERY_NODE_NODE_LIST == nodeType(pNode) || QUERY_NODE_COLUMN == nodeType(pNode)) { + if (QUERY_NODE_VALUE == nodeType(pNode) || QUERY_NODE_NODE_LIST == nodeType(pNode) || QUERY_NODE_COLUMN == nodeType(pNode)|| QUERY_NODE_LEFT_VALUE == nodeType(pNode)) { return DEAL_RES_CONTINUE; } diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c index 45742189d5e0585d68730a1c2f9843ecf58688b6..6ee5f038d661d06090d74487531adabec4c9abf9 100644 --- a/source/libs/scalar/src/sclfunc.c +++ b/source/libs/scalar/src/sclfunc.c @@ -633,7 +633,7 @@ static int32_t doTrimFunction(SScalarParam *pInput, int32_t inputNum, SScalarPar continue; } - char *input = colDataGetData(pInput[0].columnData, i); + char *input = colDataGetData(pInputData, i); int32_t len = varDataLen(input); int32_t charLen = (type == TSDB_DATA_TYPE_VARCHAR) ? len : len / TSDB_NCHAR_SIZE; trimFn(input, output, type, charLen); @@ -707,6 +707,7 @@ int32_t substrFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOu int32_t castFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) { int16_t inputType = GET_PARAM_TYPE(&pInput[0]); + int16_t inputLen = GET_PARAM_BYTES(&pInput[0]); int16_t outputType = GET_PARAM_TYPE(&pOutput[0]); int64_t outputLen = GET_PARAM_BYTES(&pOutput[0]); @@ -718,15 +719,15 @@ int32_t castFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutp colDataAppendNULL(pOutput->columnData, i); continue; } + char *input = colDataGetData(pInput[0].columnData, i); switch(outputType) { case TSDB_DATA_TYPE_BIGINT: { if (inputType == TSDB_DATA_TYPE_BINARY) { - memcpy(output, varDataVal(input), varDataLen(input)); - *(int64_t *)output = taosStr2Int64(output, NULL, 10); + *(int64_t *)output = taosStr2Int64(varDataVal(input), NULL, 10); } else if (inputType == TSDB_DATA_TYPE_NCHAR) { - char *newBuf = taosMemoryCalloc(1, outputLen * TSDB_NCHAR_SIZE + 1); + char *newBuf = taosMemoryCalloc(1, inputLen); int32_t len = taosUcs4ToMbs((TdUcs4 *)varDataVal(input), varDataLen(input), newBuf); if (len < 0) { taosMemoryFree(newBuf); @@ -742,10 +743,9 @@ int32_t castFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutp } case TSDB_DATA_TYPE_UBIGINT: { if (inputType == TSDB_DATA_TYPE_BINARY) { - memcpy(output, varDataVal(input), varDataLen(input)); - *(uint64_t *)output = taosStr2UInt64(output, NULL, 10); + *(uint64_t *)output = taosStr2UInt64(varDataVal(input), NULL, 10); } else if (inputType == TSDB_DATA_TYPE_NCHAR) { - char *newBuf = taosMemoryCalloc(1, outputLen * TSDB_NCHAR_SIZE + 1); + char *newBuf = taosMemoryCalloc(1, inputLen); int32_t len = taosUcs4ToMbs((TdUcs4 *)varDataVal(input), varDataLen(input), newBuf); if (len < 0) { taosMemoryFree(newBuf); @@ -824,7 +824,7 @@ int32_t castFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutp } //for constant conversion, need to set proper length of pOutput description if (len < outputLen) { - pOutput->columnData->info.bytes = len; + pOutput->columnData->info.bytes = len + VARSTR_HEADER_SIZE; } break; } @@ -893,7 +893,7 @@ int32_t toISO8601Function(SScalarParam *pInput, int32_t inputNum, SScalarParam * memmove(tzInfo + fracLen, tzInfo, strlen(tzInfo)); } - char tmp[32]; + char tmp[32] = {0}; sprintf(tmp, ".%s", fraction); memcpy(tzInfo, tmp, fracLen); len += fracLen; @@ -925,10 +925,9 @@ int32_t toUnixtimestampFunction(SScalarParam *pInput, int32_t inputNum, SScalarP int32_t ret = convertStringToTimestamp(type, input, timePrec, &timeVal); if (ret != TSDB_CODE_SUCCESS) { colDataAppendNULL(pOutput->columnData, i); - continue; + } else { + colDataAppend(pOutput->columnData, i, (char *)&timeVal, false); } - - colDataAppend(pOutput->columnData, i, (char *)&timeVal, false); } pOutput->numOfRows = pInput->numOfRows; diff --git a/source/libs/scalar/src/sclvector.c b/source/libs/scalar/src/sclvector.c index 145ed69a775a2a42bc04664b012ba1efe3995bdb..0fb3712c30bb349a406a92f14346370f80112ae6 100644 --- a/source/libs/scalar/src/sclvector.c +++ b/source/libs/scalar/src/sclvector.c @@ -1333,6 +1333,22 @@ void vectorMathMinus(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pO doReleaseVec(pLeftCol, leftConvert); } +void vectorAssign(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut, int32_t _ord) { + SColumnInfoData *pOutputCol = pOut->columnData; + + pOut->numOfRows = pLeft->numOfRows; + + if (colDataIsNull_s(pRight->columnData, 0)) { + for (int32_t i = 0; i < pOut->numOfRows; ++i) { + colDataAppend(pOutputCol, i, NULL, true); + } + } else { + for (int32_t i = 0; i < pOut->numOfRows; ++i) { + colDataAppend(pOutputCol, i, colDataGetData(pRight->columnData, 0), false); + } + } +} + void vectorConcat(SScalarParam* pLeft, SScalarParam* pRight, void *out, int32_t _ord) { #if 0 int32_t len = pLeft->bytes + pRight->bytes; @@ -1691,6 +1707,8 @@ _bin_scalar_fn_t getBinScalarOperatorFn(int32_t binFunctionId) { return vectorMathRemainder; case OP_TYPE_MINUS: return vectorMathMinus; + case OP_TYPE_ASSIGN: + return vectorAssign; case OP_TYPE_GREATER_THAN: return vectorGreater; case OP_TYPE_GREATER_EQUAL: diff --git a/source/libs/scalar/test/scalar/CMakeLists.txt b/source/libs/scalar/test/scalar/CMakeLists.txt index 15d1c2cb4424fded0b04d1c82504768d57b21807..672cb5a3de39bfed51c9d399ac3d0431614f50ab 100644 --- a/source/libs/scalar/test/scalar/CMakeLists.txt +++ b/source/libs/scalar/test/scalar/CMakeLists.txt @@ -8,7 +8,7 @@ AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) ADD_EXECUTABLE(scalarTest ${SOURCE_LIST}) TARGET_LINK_LIBRARIES( scalarTest - PUBLIC os util common gtest qcom function nodes scalar parser + PUBLIC os util common gtest qcom function nodes scalar parser catalog transport ) TARGET_INCLUDE_DIRECTORIES( @@ -17,7 +17,9 @@ TARGET_INCLUDE_DIRECTORIES( PUBLIC "${TD_SOURCE_DIR}/source/libs/parser/inc" PRIVATE "${TD_SOURCE_DIR}/source/libs/scalar/inc" ) -add_test( - NAME scalarTest - COMMAND scalarTest -) +if(NOT TD_WINDOWS) + add_test( + NAME scalarTest + COMMAND scalarTest + ) +endif(NOT TD_WINDOWS) diff --git a/source/libs/scalar/test/scalar/scalarTests.cpp b/source/libs/scalar/test/scalar/scalarTests.cpp index fb67695e8983f32973fdbe1dafa675082df5c81f..3fafc83b18365d490003a792748606c8d4fce804 100644 --- a/source/libs/scalar/test/scalar/scalarTests.cpp +++ b/source/libs/scalar/test/scalar/scalarTests.cpp @@ -1089,7 +1089,7 @@ void makeCalculate(void *json, void *key, int32_t rightType, void *rightData, do }else if(opType == OP_TYPE_ADD || opType == OP_TYPE_SUB || opType == OP_TYPE_MULTI || opType == OP_TYPE_DIV || opType == OP_TYPE_MOD || opType == OP_TYPE_MINUS){ printf("1result:%f,except:%f\n", *((double *)colDataGetData(column, 0)), exceptValue); - ASSERT_TRUE(abs(*((double *)colDataGetData(column, 0)) - exceptValue) < 1e-15); + ASSERT_TRUE(fabs(*((double *)colDataGetData(column, 0)) - exceptValue) < 0.0001); }else if(opType == OP_TYPE_BIT_AND || opType == OP_TYPE_BIT_OR){ printf("2result:%ld,except:%f\n", *((int64_t *)colDataGetData(column, 0)), exceptValue); ASSERT_EQ(*((int64_t *)colDataGetData(column, 0)), exceptValue); @@ -1107,8 +1107,10 @@ void makeCalculate(void *json, void *key, int32_t rightType, void *rightData, do TEST(columnTest, json_column_arith_op) { scltInitLogFile(); - char *rightv= "{\"k1\":4,\"k2\":\"hello\",\"k3\":null,\"k4\":true,\"k5\":5.44}"; + char *rightvTmp= "{\"k1\":4,\"k2\":\"hello\",\"k3\":null,\"k4\":true,\"k5\":5.44}"; + char rightv[256] = {0}; + memcpy(rightv, rightvTmp, strlen(rightvTmp)); SKVRowBuilder kvRowBuilder; tdInitKVRowBuilder(&kvRowBuilder); parseJsontoTagData(rightv, &kvRowBuilder, NULL, 0); @@ -1189,8 +1191,10 @@ void *prepareNchar(char* rightData){ TEST(columnTest, json_column_logic_op) { scltInitLogFile(); - char *rightv= "{\"k1\":4,\"k2\":\"hello\",\"k3\":null,\"k4\":true,\"k5\":5.44,\"k6\":\"6.6hello\"}"; + char *rightvTmp= "{\"k1\":4,\"k2\":\"hello\",\"k3\":null,\"k4\":true,\"k5\":5.44,\"k6\":\"6.6hello\"}"; + char rightv[256] = {0}; + memcpy(rightv, rightvTmp, strlen(rightvTmp)); SKVRowBuilder kvRowBuilder; tdInitKVRowBuilder(&kvRowBuilder); parseJsontoTagData(rightv, &kvRowBuilder, NULL, 0); diff --git a/source/libs/scheduler/inc/schedulerInt.h b/source/libs/scheduler/inc/schedulerInt.h index be92de774b8964c975360467600f570c2540c907..3302a4b61ad29c5806430c3bf2b13a82bf16ff1e 100644 --- a/source/libs/scheduler/inc/schedulerInt.h +++ b/source/libs/scheduler/inc/schedulerInt.h @@ -39,9 +39,14 @@ enum { SCH_WRITE, }; +enum { + SCH_EXEC_CB = 1, + SCH_FETCH_CB, +}; + typedef struct SSchTrans { - void *transInst; - void *transHandle; + void *pTrans; + void *pHandle; } SSchTrans; typedef struct SSchHbTrans { @@ -74,12 +79,19 @@ typedef struct SSchJobStat { } SSchJobStat; -typedef struct SSchedulerStat { +typedef struct SSchStat { SSchApiStat api; SSchRuntimeStat runtime; SSchJobStat job; -} SSchedulerStat; +} SSchStat; +typedef struct SSchResInfo { + SQueryResult* queryRes; + void** fetchRes; + schedulerExecCallback execFp; + schedulerFetchCallback fetchFp; + void* userParam; +} SSchResInfo; typedef struct SSchedulerMgmt { uint64_t taskId; // sequential taksId @@ -89,7 +101,7 @@ typedef struct SSchedulerMgmt { bool exit; int32_t jobRef; int32_t jobNum; - SSchedulerStat stat; + SSchStat stat; SHashObj *hbConnections; } SSchedulerMgmt; @@ -108,7 +120,7 @@ typedef struct SSchTaskCallbackParam { typedef struct SSchHbCallbackParam { SSchCallbackParamHeader head; SQueryNodeEpId nodeEpId; - void *transport; + void *pTrans; } SSchHbCallbackParam; typedef struct SSchFlowControl { @@ -132,7 +144,7 @@ typedef struct SSchLevel { int32_t taskSucceed; int32_t taskNum; int32_t taskLaunchedNum; - SHashObj *flowCtrl; // key is ep, element is SSchFlowControl + int32_t taskDoneNum; SArray *subTasks; // Element is SQueryTask } SSchLevel; @@ -170,16 +182,18 @@ typedef struct SSchJob { SSchJobAttr attr; int32_t levelNum; int32_t taskNum; - void *transport; + void *pTrans; SArray *nodeList; // qnode/vnode list, SArray SArray *levels; // starting from 0. SArray SNodeList *subPlans; // subplan pointer copied from DAG, no need to free it in scheduler + SArray *dataSrcTasks; // SArray int32_t levelIdx; SEpSet dataSrcEps; SHashObj *execTasks; // executing tasks, key:taskid, value:SQueryTask* SHashObj *succTasks; // succeed tasks, key:taskid, value:SQueryTask* SHashObj *failTasks; // failed tasks, key:taskid, value:SQueryTask* + SHashObj *flowCtrl; // key is ep, element is SSchFlowControl SExplainCtx *explainCtx; int8_t status; @@ -189,18 +203,19 @@ typedef struct SSchJob { int32_t remoteFetch; SSchTask *fetchTask; int32_t errCode; - SArray *errList; // SArray SRWLatch resLock; void *queryRes; void *resData; //TODO free it or not int32_t resNumOfRows; + SSchResInfo userRes; const char *sql; + int32_t userCb; SQueryProfileSummary summary; } SSchJob; extern SSchedulerMgmt schMgmt; -#define SCH_TASK_READY_TO_LUNCH(readyNum, task) ((readyNum) >= taosArrayGetSize((task)->children)) +#define SCH_TASK_READY_FOR_LAUNCH(readyNum, task) ((readyNum) >= taosArrayGetSize((task)->children)) #define SCH_TASK_ID(_task) ((_task) ? (_task)->taskId : -1) #define SCH_SET_TASK_LASTMSG_TYPE(_task, _type) do { if(_task) { atomic_store_32(&(_task)->lastMsgType, _type); } } while (0) @@ -223,7 +238,7 @@ extern SSchedulerMgmt schMgmt; #define SCH_SET_JOB_NEED_FLOW_CTRL(_job) (_job)->attr.needFlowCtrl = true #define SCH_JOB_NEED_FLOW_CTRL(_job) ((_job)->attr.needFlowCtrl) -#define SCH_TASK_NEED_FLOW_CTRL(_job, _task) (SCH_IS_DATA_SRC_QRY_TASK(_task) && SCH_JOB_NEED_FLOW_CTRL(_job) && SCH_IS_LEAF_TASK(_job, _task) && SCH_IS_LEVEL_UNFINISHED((_task)->level)) +#define SCH_TASK_NEED_FLOW_CTRL(_job, _task) (SCH_IS_DATA_SRC_QRY_TASK(_task) && SCH_JOB_NEED_FLOW_CTRL(_job) && SCH_IS_LEVEL_UNFINISHED((_task)->level)) #define SCH_SET_JOB_TYPE(_job, type) (_job)->attr.queryJob = ((type) != SUBPLAN_TYPE_MODIFY) #define SCH_IS_QUERY_JOB(_job) ((_job)->attr.queryJob) @@ -261,8 +276,8 @@ int32_t schLaunchTask(SSchJob *job, SSchTask *task); int32_t schBuildAndSendMsg(SSchJob *job, SSchTask *task, SQueryNodeAddr *addr, int32_t msgType); SSchJob *schAcquireJob(int64_t refId); int32_t schReleaseJob(int64_t refId); -void schFreeFlowCtrl(SSchLevel *pLevel); -int32_t schCheckJobNeedFlowCtrl(SSchJob *pJob, SSchLevel *pLevel); +void schFreeFlowCtrl(SSchJob *pJob); +int32_t schChkJobNeedFlowCtrl(SSchJob *pJob, SSchLevel *pLevel); int32_t schDecTaskFlowQuota(SSchJob *pJob, SSchTask *pTask); int32_t schCheckIncTaskFlowQuota(SSchJob *pJob, SSchTask *pTask, bool *enough); int32_t schLaunchTasksInFlowCtrlList(SSchJob *pJob, SSchTask *pTask); @@ -273,6 +288,38 @@ int32_t schBuildAndSendHbMsg(SQueryNodeEpId *nodeEpId); int32_t schCloneSMsgSendInfo(void *src, void **dst); int32_t schValidateAndBuildJob(SQueryPlan *pDag, SSchJob *pJob); void schFreeJobImpl(void *job); +int32_t schMakeHbCallbackParam(SSchJob *pJob, SSchTask *pTask, void **pParam); +int32_t schMakeHbRpcCtx(SSchJob *pJob, SSchTask *pTask, SRpcCtx *pCtx); +int32_t schEnsureHbConnection(SSchJob *pJob, SSchTask *pTask); +int32_t schUpdateHbConnection(SQueryNodeEpId *epId, SSchTrans *trans); +int32_t schHandleHbCallback(void *param, const SDataBuf *pMsg, int32_t code); +void schFreeRpcCtx(SRpcCtx *pCtx); +int32_t schGetCallbackFp(int32_t msgType, __async_send_cb_fn_t *fp); +bool schJobNeedToStop(SSchJob *pJob, int8_t *pStatus); +int32_t schProcessOnTaskSuccess(SSchJob *pJob, SSchTask *pTask); +int32_t schSaveJobQueryRes(SSchJob *pJob, SQueryTableRsp *rsp); +int32_t schProcessOnExplainDone(SSchJob *pJob, SSchTask *pTask, SRetrieveTableRsp *pRsp); +void schProcessOnDataFetched(SSchJob *job); +int32_t schGetTaskFromTaskList(SHashObj *pTaskList, uint64_t taskId, SSchTask **pTask); +int32_t schUpdateTaskExecNodeHandle(SSchTask *pTask, void *handle, int32_t rspCode); +void schFreeRpcCtxVal(const void *arg); +int32_t schMakeBrokenLinkVal(SSchJob *pJob, SSchTask *pTask, SRpcBrokenlinkVal *brokenVal, bool isHb); +int32_t schRecordTaskExecNode(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr, void *handle); +int32_t schExecStaticExplainJob(void *pTrans, SArray *pNodeList, SQueryPlan *pDag, int64_t *job, const char *sql, + SSchResInfo *pRes, bool sync); +int32_t schExecJobImpl(void *pTrans, SArray *pNodeList, SQueryPlan *pDag, int64_t *job, const char *sql, + SSchResInfo *pRes, int64_t startTs, bool sync); +int32_t schChkUpdateJobStatus(SSchJob *pJob, int8_t newStatus); +int32_t schCancelJob(SSchJob *pJob); +int32_t schProcessOnJobDropped(SSchJob *pJob, int32_t errCode); +uint64_t schGenTaskId(void); +void schCloseJobRef(void); +int32_t schExecJob(void *pTrans, SArray *pNodeList, SQueryPlan *pDag, int64_t *pJob, const char *sql, + int64_t startTs, SSchResInfo *pRes); +int32_t schAsyncExecJob(void *pTrans, SArray *pNodeList, SQueryPlan *pDag, int64_t *pJob, const char *sql, + int64_t startTs, SSchResInfo *pRes); +int32_t schFetchRows(SSchJob *pJob); +int32_t schAsyncFetchRows(SSchJob *pJob); #ifdef __cplusplus diff --git a/source/libs/scheduler/src/schDbg.c b/source/libs/scheduler/src/schDbg.c new file mode 100644 index 0000000000000000000000000000000000000000..4b5f74114d2ae7d4ec47b09f8a48da2f3f61de8d --- /dev/null +++ b/source/libs/scheduler/src/schDbg.c @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "query.h" +#include "schedulerInt.h" + +tsem_t schdRspSem; + +void schdExecCallback(SQueryResult* pResult, void* param, int32_t code) { + if (code) { + pResult->code = code; + } + + *(SQueryResult*)param = *pResult; + + taosMemoryFree(pResult); + + tsem_post(&schdRspSem); +} + +void schdFetchCallback(void* pResult, void* param, int32_t code) { + SSchdFetchParam* fParam = (SSchdFetchParam*)param; + + *fParam->pData = pResult; + *fParam->code = code; + + tsem_post(&schdRspSem); +} + + diff --git a/source/libs/scheduler/src/schFlowCtrl.c b/source/libs/scheduler/src/schFlowCtrl.c index 993521da8722afd2363e47d65cc48b51526968e6..85d205f5f2eba00a2f6bd741b891b26f30488d05 100644 --- a/source/libs/scheduler/src/schFlowCtrl.c +++ b/source/libs/scheduler/src/schFlowCtrl.c @@ -19,13 +19,13 @@ #include "catalog.h" #include "tref.h" -void schFreeFlowCtrl(SSchLevel *pLevel) { - if (NULL == pLevel->flowCtrl) { +void schFreeFlowCtrl(SSchJob *pJob) { + if (NULL == pJob->flowCtrl) { return; } SSchFlowControl *ctrl = NULL; - void *pIter = taosHashIterate(pLevel->flowCtrl, NULL); + void *pIter = taosHashIterate(pJob->flowCtrl, NULL); while (pIter) { ctrl = (SSchFlowControl *)pIter; @@ -33,23 +33,23 @@ void schFreeFlowCtrl(SSchLevel *pLevel) { taosArrayDestroy(ctrl->taskList); } - pIter = taosHashIterate(pLevel->flowCtrl, pIter); + pIter = taosHashIterate(pJob->flowCtrl, pIter); } - taosHashCleanup(pLevel->flowCtrl); - pLevel->flowCtrl = NULL; + taosHashCleanup(pJob->flowCtrl); + pJob->flowCtrl = NULL; } -int32_t schCheckJobNeedFlowCtrl(SSchJob *pJob, SSchLevel *pLevel) { +int32_t schChkJobNeedFlowCtrl(SSchJob *pJob, SSchLevel *pLevel) { if (!SCH_IS_QUERY_JOB(pJob)) { SCH_JOB_DLOG("job no need flow ctrl, queryJob:%d", SCH_IS_QUERY_JOB(pJob)); return TSDB_CODE_SUCCESS; } int32_t sum = 0; - - for (int32_t i = 0; i < pLevel->taskNum; ++i) { - SSchTask *pTask = taosArrayGet(pLevel->subTasks, i); + int32_t taskNum = taosArrayGetSize(pJob->dataSrcTasks); + for (int32_t i = 0; i < taskNum; ++i) { + SSchTask *pTask = *(SSchTask **)taosArrayGet(pJob->dataSrcTasks, i); sum += pTask->plan->execNodeStat.tableNum; } @@ -59,9 +59,9 @@ int32_t schCheckJobNeedFlowCtrl(SSchJob *pJob, SSchLevel *pLevel) { return TSDB_CODE_SUCCESS; } - pLevel->flowCtrl = taosHashInit(pLevel->taskNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_ENTRY_LOCK); - if (NULL == pLevel->flowCtrl) { - SCH_JOB_ELOG("taosHashInit %d flowCtrl failed", pLevel->taskNum); + pJob->flowCtrl = taosHashInit(pJob->taskNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_ENTRY_LOCK); + if (NULL == pJob->flowCtrl) { + SCH_JOB_ELOG("taosHashInit %d flowCtrl failed", pJob->taskNum); SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); } @@ -78,7 +78,7 @@ int32_t schDecTaskFlowQuota(SSchJob *pJob, SSchTask *pTask) { int32_t code = 0; SEp *ep = SCH_GET_CUR_EP(&pTask->plan->execNode); - ctrl = (SSchFlowControl *)taosHashGet(pLevel->flowCtrl, ep, sizeof(SEp)); + ctrl = (SSchFlowControl *)taosHashGet(pJob->flowCtrl, ep, sizeof(SEp)); if (NULL == ctrl) { SCH_TASK_ELOG("taosHashGet node from flowCtrl failed, fqdn:%s, port:%d", ep->fqdn, ep->port); SCH_ERR_RET(TSDB_CODE_SCH_INTERNAL_ERROR); @@ -110,11 +110,11 @@ int32_t schCheckIncTaskFlowQuota(SSchJob *pJob, SSchTask *pTask, bool *enough) { SEp *ep = SCH_GET_CUR_EP(&pTask->plan->execNode); do { - ctrl = (SSchFlowControl *)taosHashGet(pLevel->flowCtrl, ep, sizeof(SEp)); + ctrl = (SSchFlowControl *)taosHashGet(pJob->flowCtrl, ep, sizeof(SEp)); if (NULL == ctrl) { SSchFlowControl nctrl = {.tableNumSum = pTask->plan->execNodeStat.tableNum, .execTaskNum = 1}; - code = taosHashPut(pLevel->flowCtrl, ep, sizeof(SEp), &nctrl, sizeof(nctrl)); + code = taosHashPut(pJob->flowCtrl, ep, sizeof(SEp), &nctrl, sizeof(nctrl)); if (code) { if (HASH_NODE_EXIST(code)) { continue; @@ -273,10 +273,9 @@ int32_t schLaunchTasksInFlowCtrlList(SSchJob *pJob, SSchTask *pTask) { SCH_ERR_RET(schDecTaskFlowQuota(pJob, pTask)); - SSchLevel *pLevel = pTask->level; SEp *ep = SCH_GET_CUR_EP(&pTask->plan->execNode); - SSchFlowControl *ctrl = (SSchFlowControl *)taosHashGet(pLevel->flowCtrl, ep, sizeof(SEp)); + SSchFlowControl *ctrl = (SSchFlowControl *)taosHashGet(pJob->flowCtrl, ep, sizeof(SEp)); if (NULL == ctrl) { SCH_TASK_ELOG("taosHashGet node from flowCtrl failed, fqdn:%s, port:%d", ep->fqdn, ep->port); SCH_ERR_RET(TSDB_CODE_SCH_INTERNAL_ERROR); diff --git a/source/libs/scheduler/src/schJob.c b/source/libs/scheduler/src/schJob.c new file mode 100644 index 0000000000000000000000000000000000000000..e5aa2bd523404c669e575feb913c16d2de7ac84e --- /dev/null +++ b/source/libs/scheduler/src/schJob.c @@ -0,0 +1,1542 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "catalog.h" +#include "command.h" +#include "query.h" +#include "schedulerInt.h" +#include "tmsg.h" +#include "tref.h" +#include "trpc.h" + +FORCE_INLINE SSchJob *schAcquireJob(int64_t refId) { return (SSchJob *)taosAcquireRef(schMgmt.jobRef, refId); } + +FORCE_INLINE int32_t schReleaseJob(int64_t refId) { return taosReleaseRef(schMgmt.jobRef, refId); } + +int32_t schInitTask(SSchJob *pJob, SSchTask *pTask, SSubplan *pPlan, SSchLevel *pLevel) { + pTask->plan = pPlan; + pTask->level = pLevel; + SCH_SET_TASK_STATUS(pTask, JOB_TASK_STATUS_NOT_START); + pTask->taskId = schGenTaskId(); + pTask->execNodes = taosArrayInit(SCH_MAX_CANDIDATE_EP_NUM, sizeof(SSchNodeInfo)); + if (NULL == pTask->execNodes) { + SCH_TASK_ELOG("taosArrayInit %d execNodes failed", SCH_MAX_CANDIDATE_EP_NUM); + SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + return TSDB_CODE_SUCCESS; +} + +int32_t schInitJob(SSchJob **pSchJob, SQueryPlan *pDag, void *pTrans, SArray *pNodeList, const char *sql, + SSchResInfo *pRes, int64_t startTs, bool syncSchedule) { + int32_t code = 0; + int64_t refId = -1; + SSchJob *pJob = taosMemoryCalloc(1, sizeof(SSchJob)); + if (NULL == pJob) { + qError("QID:%" PRIx64 " calloc %d failed", pDag->queryId, (int32_t)sizeof(SSchJob)); + SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + pJob->attr.explainMode = pDag->explainInfo.mode; + pJob->attr.syncSchedule = syncSchedule; + pJob->pTrans = pTrans; + pJob->sql = sql; + if (pRes) { + pJob->userRes = *pRes; + } + + if (pNodeList != NULL) { + pJob->nodeList = taosArrayDup(pNodeList); + } + + SCH_ERR_JRET(schValidateAndBuildJob(pDag, pJob)); + + if (SCH_IS_EXPLAIN_JOB(pJob)) { + SCH_ERR_JRET(qExecExplainBegin(pDag, &pJob->explainCtx, startTs)); + } + + pJob->execTasks = + taosHashInit(pDag->numOfSubplans, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), false, HASH_ENTRY_LOCK); + if (NULL == pJob->execTasks) { + SCH_JOB_ELOG("taosHashInit %d execTasks failed", pDag->numOfSubplans); + SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + pJob->succTasks = + taosHashInit(pDag->numOfSubplans, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), false, HASH_ENTRY_LOCK); + if (NULL == pJob->succTasks) { + SCH_JOB_ELOG("taosHashInit %d succTasks failed", pDag->numOfSubplans); + SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + pJob->failTasks = + taosHashInit(pDag->numOfSubplans, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), false, HASH_ENTRY_LOCK); + if (NULL == pJob->failTasks) { + SCH_JOB_ELOG("taosHashInit %d failTasks failed", pDag->numOfSubplans); + SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + tsem_init(&pJob->rspSem, 0, 0); + + refId = taosAddRef(schMgmt.jobRef, pJob); + if (refId < 0) { + SCH_JOB_ELOG("taosAddRef job failed, error:%s", tstrerror(terrno)); + SCH_ERR_JRET(terrno); + } + + atomic_add_fetch_32(&schMgmt.jobNum, 1); + + if (NULL == schAcquireJob(refId)) { + SCH_JOB_ELOG("schAcquireJob job failed, refId:%" PRIx64, refId); + SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR); + } + + pJob->refId = refId; + + SCH_JOB_DLOG("job refId:%" PRIx64, pJob->refId); + + pJob->status = JOB_TASK_STATUS_NOT_START; + + *pSchJob = pJob; + + return TSDB_CODE_SUCCESS; + +_return: + + if (refId < 0) { + schFreeJobImpl(pJob); + } else { + taosRemoveRef(schMgmt.jobRef, refId); + } + SCH_RET(code); +} + +void schFreeTask(SSchTask *pTask) { + if (pTask->candidateAddrs) { + taosArrayDestroy(pTask->candidateAddrs); + } + + taosMemoryFreeClear(pTask->msg); + + if (pTask->children) { + taosArrayDestroy(pTask->children); + } + + if (pTask->parents) { + taosArrayDestroy(pTask->parents); + } + + if (pTask->execNodes) { + taosArrayDestroy(pTask->execNodes); + } +} + +FORCE_INLINE bool schJobNeedToStop(SSchJob *pJob, int8_t *pStatus) { + int8_t status = SCH_GET_JOB_STATUS(pJob); + if (pStatus) { + *pStatus = status; + } + + return (status == JOB_TASK_STATUS_FAILED || status == JOB_TASK_STATUS_CANCELLED || + status == JOB_TASK_STATUS_CANCELLING || status == JOB_TASK_STATUS_DROPPING || + status == JOB_TASK_STATUS_SUCCEED); +} + +int32_t schChkUpdateJobStatus(SSchJob *pJob, int8_t newStatus) { + int32_t code = 0; + + int8_t oriStatus = 0; + + while (true) { + oriStatus = SCH_GET_JOB_STATUS(pJob); + + if (oriStatus == newStatus) { + SCH_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); + } + + switch (oriStatus) { + case JOB_TASK_STATUS_NULL: + if (newStatus != JOB_TASK_STATUS_NOT_START) { + SCH_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); + } + + break; + case JOB_TASK_STATUS_NOT_START: + if (newStatus != JOB_TASK_STATUS_EXECUTING) { + SCH_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); + } + + break; + case JOB_TASK_STATUS_EXECUTING: + if (newStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED && newStatus != JOB_TASK_STATUS_FAILED && + newStatus != JOB_TASK_STATUS_CANCELLING && newStatus != JOB_TASK_STATUS_CANCELLED && + newStatus != JOB_TASK_STATUS_DROPPING) { + SCH_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); + } + + break; + case JOB_TASK_STATUS_PARTIAL_SUCCEED: + if (newStatus != JOB_TASK_STATUS_FAILED && newStatus != JOB_TASK_STATUS_SUCCEED && + newStatus != JOB_TASK_STATUS_DROPPING) { + SCH_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); + } + + break; + case JOB_TASK_STATUS_SUCCEED: + case JOB_TASK_STATUS_FAILED: + case JOB_TASK_STATUS_CANCELLING: + if (newStatus != JOB_TASK_STATUS_DROPPING) { + SCH_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); + } + + break; + case JOB_TASK_STATUS_CANCELLED: + case JOB_TASK_STATUS_DROPPING: + SCH_ERR_JRET(TSDB_CODE_QRY_JOB_FREED); + break; + + default: + SCH_JOB_ELOG("invalid job status:%s", jobTaskStatusStr(oriStatus)); + SCH_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); + } + + if (oriStatus != atomic_val_compare_exchange_8(&pJob->status, oriStatus, newStatus)) { + continue; + } + + SCH_JOB_DLOG("job status updated from %s to %s", jobTaskStatusStr(oriStatus), jobTaskStatusStr(newStatus)); + + break; + } + + return TSDB_CODE_SUCCESS; + +_return: + + SCH_JOB_ELOG("invalid job status update, from %s to %s", jobTaskStatusStr(oriStatus), jobTaskStatusStr(newStatus)); + SCH_ERR_RET(code); + return TSDB_CODE_SUCCESS; +} + +int32_t schBuildTaskRalation(SSchJob *pJob, SHashObj *planToTask) { + for (int32_t i = 0; i < pJob->levelNum; ++i) { + SSchLevel *pLevel = taosArrayGet(pJob->levels, i); + + for (int32_t m = 0; m < pLevel->taskNum; ++m) { + SSchTask *pTask = taosArrayGet(pLevel->subTasks, m); + SSubplan *pPlan = pTask->plan; + int32_t childNum = pPlan->pChildren ? (int32_t)LIST_LENGTH(pPlan->pChildren) : 0; + int32_t parentNum = pPlan->pParents ? (int32_t)LIST_LENGTH(pPlan->pParents) : 0; + + if (childNum > 0) { + if (pJob->levelIdx == pLevel->level) { + SCH_JOB_ELOG("invalid query plan, lowest level, childNum:%d", childNum); + SCH_ERR_RET(TSDB_CODE_SCH_INTERNAL_ERROR); + } + + pTask->children = taosArrayInit(childNum, POINTER_BYTES); + if (NULL == pTask->children) { + SCH_TASK_ELOG("taosArrayInit %d children failed", childNum); + SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + } + + for (int32_t n = 0; n < childNum; ++n) { + SSubplan *child = (SSubplan *)nodesListGetNode(pPlan->pChildren, n); + SSchTask **childTask = taosHashGet(planToTask, &child, POINTER_BYTES); + if (NULL == childTask || NULL == *childTask) { + SCH_TASK_ELOG("subplan children relationship error, level:%d, taskIdx:%d, childIdx:%d", i, m, n); + SCH_ERR_RET(TSDB_CODE_SCH_INTERNAL_ERROR); + } + + if (NULL == taosArrayPush(pTask->children, childTask)) { + SCH_TASK_ELOG("taosArrayPush childTask failed, level:%d, taskIdx:%d, childIdx:%d", i, m, n); + SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + SCH_TASK_DLOG("children info, the %d child TID %" PRIx64, n, (*childTask)->taskId); + } + + if (parentNum > 0) { + if (0 == pLevel->level) { + SCH_TASK_ELOG("invalid task info, level:0, parentNum:%d", parentNum); + SCH_ERR_RET(TSDB_CODE_SCH_INTERNAL_ERROR); + } + + pTask->parents = taosArrayInit(parentNum, POINTER_BYTES); + if (NULL == pTask->parents) { + SCH_TASK_ELOG("taosArrayInit %d parents failed", parentNum); + SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + } else { + if (0 != pLevel->level) { + SCH_TASK_ELOG("invalid task info, level:%d, parentNum:%d", pLevel->level, parentNum); + SCH_ERR_RET(TSDB_CODE_SCH_INTERNAL_ERROR); + } + } + + for (int32_t n = 0; n < parentNum; ++n) { + SSubplan *parent = (SSubplan *)nodesListGetNode(pPlan->pParents, n); + SSchTask **parentTask = taosHashGet(planToTask, &parent, POINTER_BYTES); + if (NULL == parentTask || NULL == *parentTask) { + SCH_TASK_ELOG("subplan parent relationship error, level:%d, taskIdx:%d, childIdx:%d", i, m, n); + SCH_ERR_RET(TSDB_CODE_SCH_INTERNAL_ERROR); + } + + if (NULL == taosArrayPush(pTask->parents, parentTask)) { + SCH_TASK_ELOG("taosArrayPush parentTask failed, level:%d, taskIdx:%d, childIdx:%d", i, m, n); + SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + SCH_TASK_DLOG("parents info, the %d parent TID %" PRIx64, n, (*parentTask)->taskId); + } + + SCH_TASK_DLOG("level:%d, parentNum:%d, childNum:%d", i, parentNum, childNum); + } + } + + SSchLevel *pLevel = taosArrayGet(pJob->levels, 0); + if (SCH_IS_QUERY_JOB(pJob) && pLevel->taskNum > 1) { + SCH_JOB_ELOG("invalid query plan, level:0, taskNum:%d", pLevel->taskNum); + SCH_ERR_RET(TSDB_CODE_SCH_INTERNAL_ERROR); + } + + return TSDB_CODE_SUCCESS; +} + +int32_t schRecordTaskSucceedNode(SSchJob *pJob, SSchTask *pTask) { + SQueryNodeAddr *addr = taosArrayGet(pTask->candidateAddrs, pTask->candidateIdx); + if (NULL == addr) { + SCH_TASK_ELOG("taosArrayGet candidate addr failed, idx:%d, size:%d", pTask->candidateIdx, + (int32_t)taosArrayGetSize(pTask->candidateAddrs)); + SCH_ERR_RET(TSDB_CODE_SCH_INTERNAL_ERROR); + } + + pTask->succeedAddr = *addr; + + return TSDB_CODE_SUCCESS; +} + +int32_t schRecordTaskExecNode(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr, void *handle) { + SSchNodeInfo nodeInfo = {.addr = *addr, .handle = handle}; + + if (NULL == taosArrayPush(pTask->execNodes, &nodeInfo)) { + SCH_TASK_ELOG("taosArrayPush nodeInfo to execNodes list failed, errno:%d", errno); + SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + SCH_TASK_DLOG("task execNode recorded, handle:%p", handle); + + return TSDB_CODE_SUCCESS; +} + +int32_t schRecordQueryDataSrc(SSchJob *pJob, SSchTask *pTask) { + if (!SCH_IS_DATA_SRC_QRY_TASK(pTask)) { + return TSDB_CODE_SUCCESS; + } + + taosArrayPush(pJob->dataSrcTasks, &pTask); + + return TSDB_CODE_SUCCESS; +} + + +int32_t schValidateAndBuildJob(SQueryPlan *pDag, SSchJob *pJob) { + int32_t code = 0; + pJob->queryId = pDag->queryId; + + if (pDag->numOfSubplans <= 0) { + SCH_JOB_ELOG("invalid subplan num:%d", pDag->numOfSubplans); + SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); + } + + pJob->dataSrcTasks = taosArrayInit(pDag->numOfSubplans, POINTER_BYTES); + if (NULL == pJob->dataSrcTasks) { + SCH_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + + int32_t levelNum = (int32_t)LIST_LENGTH(pDag->pSubplans); + if (levelNum <= 0) { + SCH_JOB_ELOG("invalid level num:%d", levelNum); + SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); + } + + SHashObj *planToTask = taosHashInit( + SCHEDULE_DEFAULT_MAX_TASK_NUM, + taosGetDefaultHashFunction(POINTER_BYTES == sizeof(int64_t) ? TSDB_DATA_TYPE_BIGINT : TSDB_DATA_TYPE_INT), false, + HASH_NO_LOCK); + if (NULL == planToTask) { + SCH_JOB_ELOG("taosHashInit %d failed", SCHEDULE_DEFAULT_MAX_TASK_NUM); + SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + pJob->levels = taosArrayInit(levelNum, sizeof(SSchLevel)); + if (NULL == pJob->levels) { + SCH_JOB_ELOG("taosArrayInit %d failed", levelNum); + SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + pJob->levelNum = levelNum; + pJob->levelIdx = levelNum - 1; + + pJob->subPlans = pDag->pSubplans; + + SSchLevel level = {0}; + SNodeListNode *plans = NULL; + int32_t taskNum = 0; + SSchLevel *pLevel = NULL; + + level.status = JOB_TASK_STATUS_NOT_START; + + for (int32_t i = 0; i < levelNum; ++i) { + if (NULL == taosArrayPush(pJob->levels, &level)) { + SCH_JOB_ELOG("taosArrayPush level failed, level:%d", i); + SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + pLevel = taosArrayGet(pJob->levels, i); + pLevel->level = i; + + plans = (SNodeListNode *)nodesListGetNode(pDag->pSubplans, i); + if (NULL == plans) { + SCH_JOB_ELOG("empty level plan, level:%d", i); + SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT); + } + + taskNum = (int32_t)LIST_LENGTH(plans->pNodeList); + if (taskNum <= 0) { + SCH_JOB_ELOG("invalid level plan number:%d, level:%d", taskNum, i); + SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT); + } + + pLevel->taskNum = taskNum; + + pLevel->subTasks = taosArrayInit(taskNum, sizeof(SSchTask)); + if (NULL == pLevel->subTasks) { + SCH_JOB_ELOG("taosArrayInit %d failed", taskNum); + SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + for (int32_t n = 0; n < taskNum; ++n) { + SSubplan *plan = (SSubplan *)nodesListGetNode(plans->pNodeList, n); + + SCH_SET_JOB_TYPE(pJob, plan->subplanType); + + SSchTask task = {0}; + SSchTask *pTask = &task; + + SCH_ERR_JRET(schInitTask(pJob, &task, plan, pLevel)); + + void *p = taosArrayPush(pLevel->subTasks, &task); + if (NULL == p) { + SCH_TASK_ELOG("taosArrayPush task to level failed, level:%d, taskIdx:%d", pLevel->level, n); + SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + SCH_ERR_JRET(schRecordQueryDataSrc(pJob, p)); + + if (0 != taosHashPut(planToTask, &plan, POINTER_BYTES, &p, POINTER_BYTES)) { + SCH_TASK_ELOG("taosHashPut to planToTaks failed, taskIdx:%d", n); + SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + ++pJob->taskNum; + } + + SCH_JOB_DLOG("level initialized, taskNum:%d", taskNum); + } + + SCH_ERR_JRET(schBuildTaskRalation(pJob, planToTask)); + +_return: + + if (planToTask) { + taosHashCleanup(planToTask); + } + + SCH_RET(code); +} + +int32_t schSetTaskCandidateAddrs(SSchJob *pJob, SSchTask *pTask) { + if (NULL != pTask->candidateAddrs) { + return TSDB_CODE_SUCCESS; + } + + pTask->candidateIdx = 0; + pTask->candidateAddrs = taosArrayInit(SCH_MAX_CANDIDATE_EP_NUM, sizeof(SQueryNodeAddr)); + if (NULL == pTask->candidateAddrs) { + SCH_TASK_ELOG("taosArrayInit %d condidate addrs failed", SCH_MAX_CANDIDATE_EP_NUM); + SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + if (pTask->plan->execNode.epSet.numOfEps > 0) { + if (NULL == taosArrayPush(pTask->candidateAddrs, &pTask->plan->execNode)) { + SCH_TASK_ELOG("taosArrayPush execNode to candidate addrs failed, errno:%d", errno); + SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + SCH_TASK_DLOG("use execNode from plan as candidate addr, numOfEps:%d", pTask->plan->execNode.epSet.numOfEps); + + return TSDB_CODE_SUCCESS; + } + + int32_t addNum = 0; + int32_t nodeNum = 0; + if (pJob->nodeList) { + nodeNum = taosArrayGetSize(pJob->nodeList); + + for (int32_t i = 0; i < nodeNum && addNum < SCH_MAX_CANDIDATE_EP_NUM; ++i) { + SQueryNodeAddr *naddr = taosArrayGet(pJob->nodeList, i); + + if (NULL == taosArrayPush(pTask->candidateAddrs, naddr)) { + SCH_TASK_ELOG("taosArrayPush execNode to candidate addrs failed, addNum:%d, errno:%d", addNum, errno); + SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + ++addNum; + } + } + + if (addNum <= 0) { + SCH_TASK_ELOG("no available execNode as candidates, nodeNum:%d", nodeNum); + SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); + } + + /* + for (int32_t i = 0; i < job->dataSrcEps.numOfEps && addNum < SCH_MAX_CANDIDATE_EP_NUM; ++i) { + strncpy(epSet->fqdn[epSet->numOfEps], job->dataSrcEps.fqdn[i], sizeof(job->dataSrcEps.fqdn[i])); + epSet->port[epSet->numOfEps] = job->dataSrcEps.port[i]; + + ++epSet->numOfEps; + } + */ + + return TSDB_CODE_SUCCESS; +} + +int32_t schRemoveTaskFromExecList(SSchJob *pJob, SSchTask *pTask) { + int32_t code = taosHashRemove(pJob->execTasks, &pTask->taskId, sizeof(pTask->taskId)); + if (code) { + SCH_TASK_ELOG("task failed to rm from execTask list, code:%x", code); + SCH_ERR_RET(TSDB_CODE_SCH_INTERNAL_ERROR); + } + + return TSDB_CODE_SUCCESS; +} + + +int32_t schPushTaskToExecList(SSchJob *pJob, SSchTask *pTask) { + int32_t code = taosHashPut(pJob->execTasks, &pTask->taskId, sizeof(pTask->taskId), &pTask, POINTER_BYTES); + if (0 != code) { + if (HASH_NODE_EXIST(code)) { + SCH_TASK_ELOG("task already in execTask list, code:%x", code); + SCH_ERR_RET(TSDB_CODE_SCH_INTERNAL_ERROR); + } + + SCH_TASK_ELOG("taosHashPut task to execTask list failed, errno:%d", errno); + SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + SCH_TASK_DLOG("task added to execTask list, numOfTasks:%d", taosHashGetSize(pJob->execTasks)); + + return TSDB_CODE_SUCCESS; +} + +int32_t schMoveTaskToSuccList(SSchJob *pJob, SSchTask *pTask, bool *moved) { + if (0 != taosHashRemove(pJob->execTasks, &pTask->taskId, sizeof(pTask->taskId))) { + SCH_TASK_WLOG("remove task from execTask list failed, may not exist, status:%s", SCH_GET_TASK_STATUS_STR(pTask)); + } else { + SCH_TASK_DLOG("task removed from execTask list, numOfTasks:%d", taosHashGetSize(pJob->execTasks)); + } + + int32_t code = taosHashPut(pJob->succTasks, &pTask->taskId, sizeof(pTask->taskId), &pTask, POINTER_BYTES); + if (0 != code) { + if (HASH_NODE_EXIST(code)) { + *moved = true; + SCH_TASK_ELOG("task already in succTask list, status:%s", SCH_GET_TASK_STATUS_STR(pTask)); + SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); + } + + SCH_TASK_ELOG("taosHashPut task to succTask list failed, errno:%d", errno); + SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + *moved = true; + + SCH_TASK_DLOG("task moved to succTask list, numOfTasks:%d", taosHashGetSize(pJob->succTasks)); + + return TSDB_CODE_SUCCESS; +} + +int32_t schMoveTaskToFailList(SSchJob *pJob, SSchTask *pTask, bool *moved) { + *moved = false; + + if (0 != taosHashRemove(pJob->execTasks, &pTask->taskId, sizeof(pTask->taskId))) { + SCH_TASK_WLOG("remove task from execTask list failed, may not exist, status:%s", SCH_GET_TASK_STATUS_STR(pTask)); + } + + int32_t code = taosHashPut(pJob->failTasks, &pTask->taskId, sizeof(pTask->taskId), &pTask, POINTER_BYTES); + if (0 != code) { + if (HASH_NODE_EXIST(code)) { + *moved = true; + + SCH_TASK_WLOG("task already in failTask list, status:%s", SCH_GET_TASK_STATUS_STR(pTask)); + SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); + } + + SCH_TASK_ELOG("taosHashPut task to failTask list failed, errno:%d", errno); + SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + *moved = true; + + SCH_TASK_DLOG("task moved to failTask list, numOfTasks:%d", taosHashGetSize(pJob->failTasks)); + + return TSDB_CODE_SUCCESS; +} + +int32_t schMoveTaskToExecList(SSchJob *pJob, SSchTask *pTask, bool *moved) { + if (0 != taosHashRemove(pJob->succTasks, &pTask->taskId, sizeof(pTask->taskId))) { + SCH_TASK_WLOG("remove task from succTask list failed, may not exist, status:%s", SCH_GET_TASK_STATUS_STR(pTask)); + } + + int32_t code = taosHashPut(pJob->execTasks, &pTask->taskId, sizeof(pTask->taskId), &pTask, POINTER_BYTES); + if (0 != code) { + if (HASH_NODE_EXIST(code)) { + *moved = true; + + SCH_TASK_ELOG("task already in execTask list, status:%s", SCH_GET_TASK_STATUS_STR(pTask)); + SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); + } + + SCH_TASK_ELOG("taosHashPut task to execTask list failed, errno:%d", errno); + SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + *moved = true; + + SCH_TASK_DLOG("task moved to execTask list, numOfTasks:%d", taosHashGetSize(pJob->execTasks)); + + return TSDB_CODE_SUCCESS; +} + +int32_t schTaskCheckSetRetry(SSchJob *pJob, SSchTask *pTask, int32_t errCode, bool *needRetry) { + int8_t status = 0; + ++pTask->tryTimes; + + if (schJobNeedToStop(pJob, &status)) { + *needRetry = false; + SCH_TASK_DLOG("task no more retry cause of job status, job status:%s", jobTaskStatusStr(status)); + return TSDB_CODE_SUCCESS; + } + + if (pTask->tryTimes >= REQUEST_MAX_TRY_TIMES) { + *needRetry = false; + SCH_TASK_DLOG("task no more retry since reach max try times, tryTimes:%d", pTask->tryTimes); + return TSDB_CODE_SUCCESS; + } + + if (!NEED_SCHEDULER_RETRY_ERROR(errCode)) { + *needRetry = false; + SCH_TASK_DLOG("task no more retry cause of errCode, errCode:%x - %s", errCode, tstrerror(errCode)); + return TSDB_CODE_SUCCESS; + } + + // TODO CHECK epList/condidateList + if (SCH_IS_DATA_SRC_TASK(pTask)) { + if (pTask->tryTimes >= SCH_TASK_NUM_OF_EPS(&pTask->plan->execNode)) { + *needRetry = false; + SCH_TASK_DLOG("task no more retry since all ep tried, tryTimes:%d, epNum:%d", pTask->tryTimes, + SCH_TASK_NUM_OF_EPS(&pTask->plan->execNode)); + return TSDB_CODE_SUCCESS; + } + } else { + int32_t candidateNum = taosArrayGetSize(pTask->candidateAddrs); + + if ((pTask->candidateIdx + 1) >= candidateNum) { + *needRetry = false; + SCH_TASK_DLOG("task no more retry since all candiates tried, candidateIdx:%d, candidateNum:%d", + pTask->candidateIdx, candidateNum); + return TSDB_CODE_SUCCESS; + } + } + + *needRetry = true; + SCH_TASK_DLOG("task need the %dth retry, errCode:%x - %s", pTask->tryTimes, errCode, tstrerror(errCode)); + + return TSDB_CODE_SUCCESS; +} + +int32_t schHandleTaskRetry(SSchJob *pJob, SSchTask *pTask) { + atomic_sub_fetch_32(&pTask->level->taskLaunchedNum, 1); + + SCH_ERR_RET(schRemoveTaskFromExecList(pJob, pTask)); + SCH_SET_TASK_STATUS(pTask, JOB_TASK_STATUS_NOT_START); + + if (SCH_TASK_NEED_FLOW_CTRL(pJob, pTask)) { + SCH_ERR_RET(schDecTaskFlowQuota(pJob, pTask)); + SCH_ERR_RET(schLaunchTasksInFlowCtrlList(pJob, pTask)); + } + + if (SCH_IS_DATA_SRC_TASK(pTask)) { + SCH_SWITCH_EPSET(&pTask->plan->execNode); + } else { + ++pTask->candidateIdx; + } + + SCH_ERR_RET(schLaunchTask(pJob, pTask)); + + return TSDB_CODE_SUCCESS; +} + +void schUpdateJobErrCode(SSchJob *pJob, int32_t errCode) { + if (TSDB_CODE_SUCCESS == errCode) { + return; + } + + int32_t origCode = atomic_load_32(&pJob->errCode); + if (TSDB_CODE_SUCCESS == origCode) { + if (origCode == atomic_val_compare_exchange_32(&pJob->errCode, origCode, errCode)) { + goto _return; + } + + origCode = atomic_load_32(&pJob->errCode); + } + + if (NEED_CLIENT_HANDLE_ERROR(origCode)) { + return; + } + + if (NEED_CLIENT_HANDLE_ERROR(errCode)) { + atomic_store_32(&pJob->errCode, errCode); + goto _return; + } + + return; + +_return: + + SCH_JOB_DLOG("job errCode updated to %x - %s", errCode, tstrerror(errCode)); +} + + +int32_t schSetJobQueryRes(SSchJob* pJob, SQueryResult* pRes) { + pRes->code = atomic_load_32(&pJob->errCode); + pRes->numOfRows = pJob->resNumOfRows; + pRes->res = pJob->queryRes; + pJob->queryRes = NULL; + + return TSDB_CODE_SUCCESS; +} + +int32_t schSetJobFetchRes(SSchJob* pJob, void** pData) { + int32_t code = 0; + if (pJob->resData && ((SRetrieveTableRsp *)pJob->resData)->completed) { + SCH_ERR_RET(schChkUpdateJobStatus(pJob, JOB_TASK_STATUS_SUCCEED)); + } + + while (true) { + *pData = atomic_load_ptr(&pJob->resData); + if (*pData != atomic_val_compare_exchange_ptr(&pJob->resData, *pData, NULL)) { + continue; + } + + break; + } + + if (NULL == *pData) { + SRetrieveTableRsp *rsp = (SRetrieveTableRsp *)taosMemoryCalloc(1, sizeof(SRetrieveTableRsp)); + if (rsp) { + rsp->completed = 1; + } + + *pData = rsp; + SCH_JOB_DLOG("empty res and set query complete, code:%x", code); + } + + SCH_JOB_DLOG("fetch done, totalRows:%d", pJob->resNumOfRows); + + return TSDB_CODE_SUCCESS; +} + +int32_t schNotifyUserQueryRes(SSchJob* pJob) { + pJob->userRes.queryRes = taosMemoryCalloc(1, sizeof(*pJob->userRes.queryRes)); + if (pJob->userRes.queryRes) { + schSetJobQueryRes(pJob, pJob->userRes.queryRes); + } + + (*pJob->userRes.execFp)(pJob->userRes.queryRes, pJob->userRes.userParam, atomic_load_32(&pJob->errCode)); + + pJob->userRes.queryRes = NULL; + + return TSDB_CODE_SUCCESS; +} + +int32_t schNotifyUserFetchRes(SSchJob* pJob) { + void* pRes = NULL; + + SCH_ERR_RET(schSetJobFetchRes(pJob, &pRes)); + + (*pJob->userRes.fetchFp)(pRes, pJob->userRes.userParam, atomic_load_32(&pJob->errCode)); + + return TSDB_CODE_SUCCESS; +} + +int32_t schProcessOnJobFailureImpl(SSchJob *pJob, int32_t status, int32_t errCode) { + // if already FAILED, no more processing + SCH_ERR_RET(schChkUpdateJobStatus(pJob, status)); + + schUpdateJobErrCode(pJob, errCode); + + if (atomic_load_8(&pJob->userFetch) || pJob->attr.syncSchedule) { + tsem_post(&pJob->rspSem); + } + + int32_t code = atomic_load_32(&pJob->errCode); + + SCH_JOB_DLOG("job failed with error: %s", tstrerror(code)); + + if (!pJob->attr.syncSchedule) { + if (SCH_EXEC_CB == atomic_val_compare_exchange_32(&pJob->userCb, SCH_EXEC_CB, 0)) { + schNotifyUserQueryRes(pJob); + } else if (SCH_FETCH_CB == atomic_val_compare_exchange_32(&pJob->userCb, SCH_FETCH_CB, 0)) { + schNotifyUserFetchRes(pJob); + } + } + + SCH_RET(code); +} + +// Note: no more task error processing, handled in function internal +int32_t schProcessOnJobFailure(SSchJob *pJob, int32_t errCode) { + SCH_RET(schProcessOnJobFailureImpl(pJob, JOB_TASK_STATUS_FAILED, errCode)); +} + +// Note: no more error processing, handled in function internal +int32_t schProcessOnJobDropped(SSchJob *pJob, int32_t errCode) { + SCH_RET(schProcessOnJobFailureImpl(pJob, JOB_TASK_STATUS_DROPPING, errCode)); +} + +// Note: no more task error processing, handled in function internal +int32_t schProcessOnJobPartialSuccess(SSchJob *pJob) { + int32_t code = 0; + + SCH_ERR_RET(schChkUpdateJobStatus(pJob, JOB_TASK_STATUS_PARTIAL_SUCCEED)); + + if (pJob->attr.syncSchedule) { + tsem_post(&pJob->rspSem); + } else if (SCH_EXEC_CB == atomic_val_compare_exchange_32(&pJob->userCb, SCH_EXEC_CB, 0)) { + schNotifyUserQueryRes(pJob); + } else if (SCH_FETCH_CB == atomic_val_compare_exchange_32(&pJob->userCb, SCH_FETCH_CB, 0)) { + schNotifyUserFetchRes(pJob); + } + + if (atomic_load_8(&pJob->userFetch)) { + SCH_ERR_JRET(schFetchFromRemote(pJob)); + } + + return TSDB_CODE_SUCCESS; + +_return: + + SCH_RET(schProcessOnJobFailure(pJob, code)); +} + +void schProcessOnDataFetched(SSchJob *job) { + atomic_val_compare_exchange_32(&job->remoteFetch, 1, 0); + + if (job->attr.syncSchedule) { + tsem_post(&job->rspSem); + } else if (SCH_FETCH_CB == atomic_val_compare_exchange_32(&job->userCb, SCH_FETCH_CB, 0)) { + schNotifyUserFetchRes(job); + } +} + +// Note: no more task error processing, handled in function internal +int32_t schProcessOnTaskFailure(SSchJob *pJob, SSchTask *pTask, int32_t errCode) { + int8_t status = 0; + + if (schJobNeedToStop(pJob, &status)) { + SCH_TASK_DLOG("task failed not processed cause of job status, job status:%s", jobTaskStatusStr(status)); + SCH_RET(atomic_load_32(&pJob->errCode)); + } + + bool needRetry = false; + bool moved = false; + int32_t taskDone = 0; + int32_t code = 0; + + SCH_TASK_DLOG("taskOnFailure, code:%s", tstrerror(errCode)); + + SCH_ERR_JRET(schTaskCheckSetRetry(pJob, pTask, errCode, &needRetry)); + + if (!needRetry) { + SCH_TASK_ELOG("task failed and no more retry, code:%s", tstrerror(errCode)); + + if (SCH_GET_TASK_STATUS(pTask) == JOB_TASK_STATUS_EXECUTING) { + SCH_ERR_JRET(schMoveTaskToFailList(pJob, pTask, &moved)); + } else { + SCH_TASK_ELOG("task not in executing list, status:%s", SCH_GET_TASK_STATUS_STR(pTask)); + SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR); + } + + SCH_SET_TASK_STATUS(pTask, JOB_TASK_STATUS_FAILED); + + if (SCH_IS_WAIT_ALL_JOB(pJob)) { + SCH_LOCK(SCH_WRITE, &pTask->level->lock); + pTask->level->taskFailed++; + taskDone = pTask->level->taskSucceed + pTask->level->taskFailed; + SCH_UNLOCK(SCH_WRITE, &pTask->level->lock); + + schUpdateJobErrCode(pJob, errCode); + + if (taskDone < pTask->level->taskNum) { + SCH_TASK_DLOG("need to wait other tasks, doneNum:%d, allNum:%d", taskDone, pTask->level->taskNum); + SCH_RET(errCode); + } + } + } else { + SCH_ERR_JRET(schHandleTaskRetry(pJob, pTask)); + + return TSDB_CODE_SUCCESS; + } + +_return: + + SCH_RET(schProcessOnJobFailure(pJob, errCode)); +} + +int32_t schLaunchNextLevelTasks(SSchJob *pJob, SSchTask *pTask) { + if (!SCH_IS_QUERY_JOB(pJob)) { + return TSDB_CODE_SUCCESS; + } + + SSchLevel *pLevel = pTask->level; + int32_t doneNum = atomic_add_fetch_32(&pLevel->taskDoneNum, 1); + if (doneNum == pLevel->taskNum) { + pJob->levelIdx--; + + pLevel = taosArrayGet(pJob->levels, pJob->levelIdx); + for (int32_t i = 0; i < pLevel->taskNum; ++i) { + SSchTask *pTask = taosArrayGet(pLevel->subTasks, i); + + if (pTask->children && taosArrayGetSize(pTask->children) > 0) { + continue; + } + + SCH_ERR_RET(schLaunchTask(pJob, pTask)); + } + } + + return TSDB_CODE_SUCCESS; +} + + +// Note: no more task error processing, handled in function internal +int32_t schProcessOnTaskSuccess(SSchJob *pJob, SSchTask *pTask) { + bool moved = false; + int32_t code = 0; + + SCH_TASK_DLOG("taskOnSuccess, status:%s", SCH_GET_TASK_STATUS_STR(pTask)); + + SCH_ERR_JRET(schMoveTaskToSuccList(pJob, pTask, &moved)); + + SCH_SET_TASK_STATUS(pTask, JOB_TASK_STATUS_PARTIAL_SUCCEED); + + SCH_ERR_JRET(schRecordTaskSucceedNode(pJob, pTask)); + + SCH_ERR_JRET(schLaunchTasksInFlowCtrlList(pJob, pTask)); + + int32_t parentNum = pTask->parents ? (int32_t)taosArrayGetSize(pTask->parents) : 0; + if (parentNum == 0) { + int32_t taskDone = 0; + if (SCH_IS_WAIT_ALL_JOB(pJob)) { + SCH_LOCK(SCH_WRITE, &pTask->level->lock); + pTask->level->taskSucceed++; + taskDone = pTask->level->taskSucceed + pTask->level->taskFailed; + SCH_UNLOCK(SCH_WRITE, &pTask->level->lock); + + if (taskDone < pTask->level->taskNum) { + SCH_TASK_DLOG("wait all tasks, done:%d, all:%d", taskDone, pTask->level->taskNum); + return TSDB_CODE_SUCCESS; + } else if (taskDone > pTask->level->taskNum) { + SCH_TASK_ELOG("taskDone number invalid, done:%d, total:%d", taskDone, pTask->level->taskNum); + } + + if (pTask->level->taskFailed > 0) { + SCH_RET(schProcessOnJobFailure(pJob, 0)); + } else { + SCH_RET(schProcessOnJobPartialSuccess(pJob)); + } + } else { + pJob->resNode = pTask->succeedAddr; + } + + pJob->fetchTask = pTask; + + SCH_ERR_JRET(schMoveTaskToExecList(pJob, pTask, &moved)); + + SCH_RET(schProcessOnJobPartialSuccess(pJob)); + } + + /* + if (SCH_IS_DATA_SRC_TASK(task) && job->dataSrcEps.numOfEps < SCH_MAX_CANDIDATE_EP_NUM) { + strncpy(job->dataSrcEps.fqdn[job->dataSrcEps.numOfEps], task->execAddr.fqdn, sizeof(task->execAddr.fqdn)); + job->dataSrcEps.port[job->dataSrcEps.numOfEps] = task->execAddr.port; + + ++job->dataSrcEps.numOfEps; + } + */ + + for (int32_t i = 0; i < parentNum; ++i) { + SSchTask *par = *(SSchTask **)taosArrayGet(pTask->parents, i); + int32_t readyNum = atomic_add_fetch_32(&par->childReady, 1); + + SCH_LOCK(SCH_WRITE, &par->lock); + SDownstreamSourceNode source = {.type = QUERY_NODE_DOWNSTREAM_SOURCE, + .taskId = pTask->taskId, + .schedId = schMgmt.sId, + .addr = pTask->succeedAddr}; + qSetSubplanExecutionNode(par->plan, pTask->plan->id.groupId, &source); + SCH_UNLOCK(SCH_WRITE, &par->lock); + + if (SCH_TASK_READY_FOR_LAUNCH(readyNum, par)) { + SCH_ERR_RET(schLaunchTask(pJob, par)); + } + } + + SCH_ERR_RET(schLaunchNextLevelTasks(pJob, pTask)); + + return TSDB_CODE_SUCCESS; + +_return: + + SCH_RET(schProcessOnJobFailure(pJob, code)); +} + +// Note: no more error processing, handled in function internal +int32_t schFetchFromRemote(SSchJob *pJob) { + int32_t code = 0; + + if (atomic_val_compare_exchange_32(&pJob->remoteFetch, 0, 1) != 0) { + SCH_JOB_ELOG("prior fetching not finished, remoteFetch:%d", atomic_load_32(&pJob->remoteFetch)); + return TSDB_CODE_SUCCESS; + } + + void *resData = atomic_load_ptr(&pJob->resData); + if (resData) { + atomic_val_compare_exchange_32(&pJob->remoteFetch, 1, 0); + + SCH_JOB_DLOG("res already fetched, res:%p", resData); + return TSDB_CODE_SUCCESS; + } + + SCH_ERR_JRET(schBuildAndSendMsg(pJob, pJob->fetchTask, &pJob->resNode, TDMT_VND_FETCH)); + + return TSDB_CODE_SUCCESS; + +_return: + + atomic_val_compare_exchange_32(&pJob->remoteFetch, 1, 0); + + SCH_RET(schProcessOnTaskFailure(pJob, pJob->fetchTask, code)); +} + +int32_t schProcessOnExplainDone(SSchJob *pJob, SSchTask *pTask, SRetrieveTableRsp *pRsp) { + SCH_TASK_DLOG("got explain rsp, rows:%d, complete:%d", htonl(pRsp->numOfRows), pRsp->completed); + + atomic_store_32(&pJob->resNumOfRows, htonl(pRsp->numOfRows)); + atomic_store_ptr(&pJob->resData, pRsp); + + SCH_SET_TASK_STATUS(pTask, JOB_TASK_STATUS_SUCCEED); + + schProcessOnDataFetched(pJob); + + return TSDB_CODE_SUCCESS; +} + +int32_t schSaveJobQueryRes(SSchJob *pJob, SQueryTableRsp *rsp) { + if (rsp->tbFName[0]) { + if (NULL == pJob->queryRes) { + pJob->queryRes = taosArrayInit(pJob->taskNum, sizeof(STbVerInfo)); + if (NULL == pJob->queryRes) { + SCH_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + } + + STbVerInfo tbInfo; + strcpy(tbInfo.tbFName, rsp->tbFName); + tbInfo.sversion = rsp->sversion; + tbInfo.tversion = rsp->tversion; + + taosArrayPush((SArray *)pJob->queryRes, &tbInfo); + } + + return TSDB_CODE_SUCCESS; +} + +int32_t schGetTaskFromTaskList(SHashObj *pTaskList, uint64_t taskId, SSchTask **pTask) { + int32_t s = taosHashGetSize(pTaskList); + if (s <= 0) { + return TSDB_CODE_SUCCESS; + } + + SSchTask **task = taosHashGet(pTaskList, &taskId, sizeof(taskId)); + if (NULL == task || NULL == (*task)) { + return TSDB_CODE_SUCCESS; + } + + *pTask = *task; + + return TSDB_CODE_SUCCESS; +} + +int32_t schUpdateTaskExecNodeHandle(SSchTask *pTask, void *handle, int32_t rspCode) { + if (rspCode || NULL == pTask->execNodes || taosArrayGetSize(pTask->execNodes) > 1 || + taosArrayGetSize(pTask->execNodes) <= 0) { + return TSDB_CODE_SUCCESS; + } + + SSchNodeInfo *nodeInfo = taosArrayGet(pTask->execNodes, 0); + nodeInfo->handle = handle; + + return TSDB_CODE_SUCCESS; +} + +int32_t schLaunchTaskImpl(SSchJob *pJob, SSchTask *pTask) { + int8_t status = 0; + int32_t code = 0; + + atomic_add_fetch_32(&pTask->level->taskLaunchedNum, 1); + + if (schJobNeedToStop(pJob, &status)) { + SCH_TASK_DLOG("no need to launch task cause of job status, job status:%s", jobTaskStatusStr(status)); + + SCH_RET(atomic_load_32(&pJob->errCode)); + } + + // NOTE: race condition: the task should be put into the hash table before send msg to server + if (SCH_GET_TASK_STATUS(pTask) != JOB_TASK_STATUS_EXECUTING) { + SCH_ERR_RET(schPushTaskToExecList(pJob, pTask)); + SCH_SET_TASK_STATUS(pTask, JOB_TASK_STATUS_EXECUTING); + } + + SSubplan *plan = pTask->plan; + + if (NULL == pTask->msg) { // TODO add more detailed reason for failure + code = qSubPlanToString(plan, &pTask->msg, &pTask->msgLen); + if (TSDB_CODE_SUCCESS != code) { + SCH_TASK_ELOG("failed to create physical plan, code:%s, msg:%p, len:%d", tstrerror(code), pTask->msg, + pTask->msgLen); + SCH_ERR_RET(code); + } else { + SCH_TASK_DLOGL("physical plan len:%d, %s", pTask->msgLen, pTask->msg); + } + } + + SCH_ERR_RET(schSetTaskCandidateAddrs(pJob, pTask)); + + if (SCH_IS_QUERY_JOB(pJob)) { + SCH_ERR_RET(schEnsureHbConnection(pJob, pTask)); + } + + SCH_ERR_RET(schBuildAndSendMsg(pJob, pTask, NULL, plan->msgType)); + + return TSDB_CODE_SUCCESS; +} + +// Note: no more error processing, handled in function internal +int32_t schLaunchTask(SSchJob *pJob, SSchTask *pTask) { + bool enough = false; + int32_t code = 0; + + SCH_SET_TASK_HANDLE(pTask, NULL); + + if (SCH_TASK_NEED_FLOW_CTRL(pJob, pTask)) { + SCH_ERR_JRET(schCheckIncTaskFlowQuota(pJob, pTask, &enough)); + + if (enough) { + SCH_ERR_JRET(schLaunchTaskImpl(pJob, pTask)); + } + } else { + SCH_ERR_JRET(schLaunchTaskImpl(pJob, pTask)); + } + + return TSDB_CODE_SUCCESS; + +_return: + + SCH_RET(schProcessOnTaskFailure(pJob, pTask, code)); +} + +int32_t schLaunchLevelTasks(SSchJob *pJob, SSchLevel *level) { + for (int32_t i = 0; i < level->taskNum; ++i) { + SSchTask *pTask = taosArrayGet(level->subTasks, i); + + SCH_ERR_RET(schLaunchTask(pJob, pTask)); + } + + return TSDB_CODE_SUCCESS; +} + +int32_t schLaunchJob(SSchJob *pJob) { + SSchLevel *level = taosArrayGet(pJob->levels, pJob->levelIdx); + + SCH_ERR_RET(schChkUpdateJobStatus(pJob, JOB_TASK_STATUS_EXECUTING)); + + SCH_ERR_RET(schChkJobNeedFlowCtrl(pJob, level)); + + SCH_ERR_RET(schLaunchLevelTasks(pJob, level)); + + return TSDB_CODE_SUCCESS; +} + +void schDropTaskOnExecNode(SSchJob *pJob, SSchTask *pTask) { + if (NULL == pTask->execNodes) { + SCH_TASK_DLOG("no exec address, status:%s", SCH_GET_TASK_STATUS_STR(pTask)); + return; + } + + int32_t size = (int32_t)taosArrayGetSize(pTask->execNodes); + + if (size <= 0) { + SCH_TASK_DLOG("task has no execNodes, no need to drop it, status:%s", SCH_GET_TASK_STATUS_STR(pTask)); + return; + } + + SSchNodeInfo *nodeInfo = NULL; + for (int32_t i = 0; i < size; ++i) { + nodeInfo = (SSchNodeInfo *)taosArrayGet(pTask->execNodes, i); + SCH_SET_TASK_HANDLE(pTask, nodeInfo->handle); + + schBuildAndSendMsg(pJob, pTask, &nodeInfo->addr, TDMT_VND_DROP_TASK); + } + + SCH_TASK_DLOG("task has %d exec address", size); +} + +void schDropTaskInHashList(SSchJob *pJob, SHashObj *list) { + if (!SCH_IS_NEED_DROP_JOB(pJob)) { + return; + } + + void *pIter = taosHashIterate(list, NULL); + while (pIter) { + SSchTask *pTask = *(SSchTask **)pIter; + + schDropTaskOnExecNode(pJob, pTask); + + pIter = taosHashIterate(list, pIter); + } +} + +void schDropJobAllTasks(SSchJob *pJob) { + schDropTaskInHashList(pJob, pJob->execTasks); + schDropTaskInHashList(pJob, pJob->succTasks); + schDropTaskInHashList(pJob, pJob->failTasks); +} + +int32_t schCancelJob(SSchJob *pJob) { + // TODO + return TSDB_CODE_SUCCESS; + // TODO MOVE ALL TASKS FROM EXEC LIST TO FAIL LIST +} + +void schFreeJobImpl(void *job) { + if (NULL == job) { + return; + } + + SSchJob *pJob = job; + uint64_t queryId = pJob->queryId; + int64_t refId = pJob->refId; + + if (pJob->status == JOB_TASK_STATUS_EXECUTING) { + schCancelJob(pJob); + } + + schDropJobAllTasks(pJob); + + pJob->subPlans = NULL; // it is a reference to pDag->pSubplans + + int32_t numOfLevels = taosArrayGetSize(pJob->levels); + for (int32_t i = 0; i < numOfLevels; ++i) { + SSchLevel *pLevel = taosArrayGet(pJob->levels, i); + + int32_t numOfTasks = taosArrayGetSize(pLevel->subTasks); + for (int32_t j = 0; j < numOfTasks; ++j) { + SSchTask *pTask = taosArrayGet(pLevel->subTasks, j); + schFreeTask(pTask); + } + + taosArrayDestroy(pLevel->subTasks); + } + + schFreeFlowCtrl(pJob); + + taosHashCleanup(pJob->execTasks); + taosHashCleanup(pJob->failTasks); + taosHashCleanup(pJob->succTasks); + + taosArrayDestroy(pJob->levels); + taosArrayDestroy(pJob->nodeList); + taosArrayDestroy(pJob->dataSrcTasks); + + qExplainFreeCtx(pJob->explainCtx); + + if (SCH_IS_QUERY_JOB(pJob)) { + taosArrayDestroy((SArray *)pJob->queryRes); + } else { + tFreeSSubmitRsp((SSubmitRsp*)pJob->queryRes); + } + + taosMemoryFreeClear(pJob->userRes.queryRes); + taosMemoryFreeClear(pJob->resData); + taosMemoryFreeClear(pJob); + + qDebug("QID:0x%" PRIx64 " job freed, refId:%" PRIx64 ", pointer:%p", queryId, refId, pJob); + + atomic_sub_fetch_32(&schMgmt.jobNum, 1); + + schCloseJobRef(); +} + +int32_t schExecJobImpl(void *pTrans, SArray *pNodeList, SQueryPlan *pDag, int64_t *job, const char *sql, + SSchResInfo *pRes, int64_t startTs, bool sync) { + qDebug("QID:0x%" PRIx64 " job started", pDag->queryId); + + if (pNodeList == NULL || taosArrayGetSize(pNodeList) <= 0) { + qDebug("QID:0x%" PRIx64 " input exec nodeList is empty", pDag->queryId); + } + + int32_t code = 0; + SSchJob *pJob = NULL; + SCH_ERR_RET(schInitJob(&pJob, pDag, pTrans, pNodeList, sql, pRes, startTs, sync)); + + *job = pJob->refId; + + SCH_ERR_JRET(schLaunchJob(pJob)); + + if (sync) { + SCH_JOB_DLOG("will wait for rsp now, job status:%s", SCH_GET_JOB_STATUS_STR(pJob)); + tsem_wait(&pJob->rspSem); + } else { + pJob->userCb = SCH_EXEC_CB; + } + + SCH_JOB_DLOG("job exec done, job status:%s", SCH_GET_JOB_STATUS_STR(pJob)); + +_return: + + schReleaseJob(pJob->refId); + + SCH_RET(code); +} + +int32_t schExecJob(void *pTrans, SArray *pNodeList, SQueryPlan *pDag, int64_t *pJob, const char *sql, + int64_t startTs, SSchResInfo *pRes) { + int32_t code = 0; + + *pJob = 0; + + if (EXPLAIN_MODE_STATIC == pDag->explainInfo.mode) { + SCH_ERR_JRET(schExecStaticExplainJob(pTrans, pNodeList, pDag, pJob, sql, NULL, true)); + } else { + SCH_ERR_JRET(schExecJobImpl(pTrans, pNodeList, pDag, pJob, sql, NULL, startTs, true)); + } + +_return: + + if (*pJob) { + SSchJob *job = schAcquireJob(*pJob); + schSetJobQueryRes(job, pRes->queryRes); + schReleaseJob(*pJob); + } + + return code; +} + +int32_t schAsyncExecJob(void *pTrans, SArray *pNodeList, SQueryPlan *pDag, int64_t *pJob, const char *sql, + int64_t startTs, SSchResInfo *pRes) { + int32_t code = 0; + + *pJob = 0; + + if (EXPLAIN_MODE_STATIC == pDag->explainInfo.mode) { + SCH_ERR_RET(schExecStaticExplainJob(pTrans, pNodeList, pDag, pJob, sql, pRes, false)); + } else { + SCH_ERR_RET(schExecJobImpl(pTrans, pNodeList, pDag, pJob, sql, pRes, startTs, false)); + } + + return code; +} + +int32_t schExecStaticExplainJob(void *pTrans, SArray *pNodeList, SQueryPlan *pDag, int64_t *job, const char *sql, + SSchResInfo *pRes, bool sync) { + qDebug("QID:0x%" PRIx64 " job started", pDag->queryId); + + int32_t code = 0; + SSchJob *pJob = taosMemoryCalloc(1, sizeof(SSchJob)); + if (NULL == pJob) { + qError("QID:%" PRIx64 " calloc %d failed", pDag->queryId, (int32_t)sizeof(SSchJob)); + SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + pJob->sql = sql; + pJob->attr.queryJob = true; + pJob->attr.syncSchedule = sync; + pJob->attr.explainMode = pDag->explainInfo.mode; + pJob->queryId = pDag->queryId; + pJob->subPlans = pDag->pSubplans; + if (pRes) { + pJob->userRes = *pRes; + } + + SCH_ERR_JRET(qExecStaticExplain(pDag, (SRetrieveTableRsp **)&pJob->resData)); + + int64_t refId = taosAddRef(schMgmt.jobRef, pJob); + if (refId < 0) { + SCH_JOB_ELOG("taosAddRef job failed, error:%s", tstrerror(terrno)); + SCH_ERR_JRET(terrno); + } + + if (NULL == schAcquireJob(refId)) { + SCH_JOB_ELOG("schAcquireJob job failed, refId:%" PRIx64, refId); + SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); + } + + pJob->refId = refId; + + SCH_JOB_DLOG("job refId:%" PRIx64, pJob->refId); + + pJob->status = JOB_TASK_STATUS_PARTIAL_SUCCEED; + + *job = pJob->refId; + SCH_JOB_DLOG("job exec done, job status:%s", SCH_GET_JOB_STATUS_STR(pJob)); + + if (!pJob->attr.syncSchedule) { + code = schNotifyUserQueryRes(pJob); + } + + schReleaseJob(pJob->refId); + + SCH_RET(code); + +_return: + + schFreeJobImpl(pJob); + SCH_RET(code); +} + +int32_t schFetchRows(SSchJob *pJob) { + int32_t code = 0; + + int8_t status = SCH_GET_JOB_STATUS(pJob); + if (status == JOB_TASK_STATUS_DROPPING) { + SCH_JOB_ELOG("job is dropping, status:%s", jobTaskStatusStr(status)); + SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); + } + + if (!SCH_JOB_NEED_FETCH(pJob)) { + SCH_JOB_ELOG("no need to fetch data, status:%s", SCH_GET_JOB_STATUS_STR(pJob)); + SCH_ERR_RET(TSDB_CODE_QRY_APP_ERROR); + } + + if (atomic_val_compare_exchange_8(&pJob->userFetch, 0, 1) != 0) { + SCH_JOB_ELOG("prior fetching not finished, userFetch:%d", atomic_load_8(&pJob->userFetch)); + SCH_ERR_RET(TSDB_CODE_QRY_APP_ERROR); + } + + if (JOB_TASK_STATUS_FAILED == status || JOB_TASK_STATUS_DROPPING == status) { + SCH_JOB_ELOG("job failed or dropping, status:%s", jobTaskStatusStr(status)); + SCH_ERR_JRET(atomic_load_32(&pJob->errCode)); + } else if (status == JOB_TASK_STATUS_SUCCEED) { + SCH_JOB_DLOG("job already succeed, status:%s", jobTaskStatusStr(status)); + goto _return; + } else if (status != JOB_TASK_STATUS_PARTIAL_SUCCEED) { + SCH_JOB_ELOG("job status error for fetch, status:%s", jobTaskStatusStr(status)); + SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR); + } + + if (!(pJob->attr.explainMode == EXPLAIN_MODE_STATIC)) { + SCH_ERR_JRET(schFetchFromRemote(pJob)); + tsem_wait(&pJob->rspSem); + + status = SCH_GET_JOB_STATUS(pJob); + if (JOB_TASK_STATUS_FAILED == status || JOB_TASK_STATUS_DROPPING == status) { + SCH_JOB_ELOG("job failed or dropping, status:%s", jobTaskStatusStr(status)); + SCH_ERR_JRET(atomic_load_32(&pJob->errCode)); + } + } + + SCH_ERR_JRET(schSetJobFetchRes(pJob, pJob->userRes.fetchRes)); + +_return: + + atomic_val_compare_exchange_8(&pJob->userFetch, 1, 0); + + SCH_RET(code); +} + +int32_t schAsyncFetchRows(SSchJob *pJob) { + int32_t code = 0; + + int8_t status = SCH_GET_JOB_STATUS(pJob); + if (status == JOB_TASK_STATUS_DROPPING) { + SCH_JOB_ELOG("job is dropping, status:%s", jobTaskStatusStr(status)); + SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); + } + + if (!SCH_JOB_NEED_FETCH(pJob)) { + SCH_JOB_ELOG("no need to fetch data, status:%s", SCH_GET_JOB_STATUS_STR(pJob)); + SCH_ERR_RET(TSDB_CODE_QRY_APP_ERROR); + } + + if (atomic_val_compare_exchange_8(&pJob->userFetch, 0, 1) != 0) { + SCH_JOB_ELOG("prior fetching not finished, userFetch:%d", atomic_load_8(&pJob->userFetch)); + SCH_ERR_RET(TSDB_CODE_QRY_APP_ERROR); + } + + if (JOB_TASK_STATUS_FAILED == status || JOB_TASK_STATUS_DROPPING == status) { + SCH_JOB_ELOG("job failed or dropping, status:%s", jobTaskStatusStr(status)); + SCH_ERR_JRET(atomic_load_32(&pJob->errCode)); + } else if (status == JOB_TASK_STATUS_SUCCEED) { + SCH_JOB_DLOG("job already succeed, status:%s", jobTaskStatusStr(status)); + goto _return; + } else if (status != JOB_TASK_STATUS_PARTIAL_SUCCEED) { + SCH_JOB_ELOG("job status error for fetch, status:%s", jobTaskStatusStr(status)); + SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR); + } + + if (pJob->attr.explainMode == EXPLAIN_MODE_STATIC) { + SCH_ERR_JRET(schNotifyUserFetchRes(pJob)); + + atomic_val_compare_exchange_8(&pJob->userFetch, 1, 0); + } else { + pJob->userCb = SCH_FETCH_CB; + + SCH_ERR_JRET(schFetchFromRemote(pJob)); + } + + return TSDB_CODE_SUCCESS; + +_return: + + atomic_val_compare_exchange_8(&pJob->userFetch, 1, 0); + + SCH_RET(code); +} + + diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c new file mode 100644 index 0000000000000000000000000000000000000000..312d587b6f0ee29a9f2da22afc23a2834747b063 --- /dev/null +++ b/source/libs/scheduler/src/schRemote.c @@ -0,0 +1,1188 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "catalog.h" +#include "command.h" +#include "query.h" +#include "schedulerInt.h" +#include "tmsg.h" +#include "tref.h" +#include "trpc.h" + + +int32_t schValidateReceivedMsgType(SSchJob *pJob, SSchTask *pTask, int32_t msgType) { + int32_t lastMsgType = SCH_GET_TASK_LASTMSG_TYPE(pTask); + int32_t taskStatus = SCH_GET_TASK_STATUS(pTask); + int32_t reqMsgType = msgType - 1; + switch (msgType) { + case TDMT_SCH_LINK_BROKEN: + case TDMT_VND_EXPLAIN_RSP: + return TSDB_CODE_SUCCESS; + case TDMT_VND_QUERY_RSP: // query_rsp may be processed later than ready_rsp + if (lastMsgType != reqMsgType && -1 != lastMsgType) { + SCH_TASK_DLOG("rsp msg type mis-match, last sent msgType:%s, rspType:%s", TMSG_INFO(lastMsgType), + TMSG_INFO(msgType)); + } + + if (taskStatus != JOB_TASK_STATUS_EXECUTING && taskStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED) { + SCH_TASK_DLOG("rsp msg conflicted with task status, status:%s, rspType:%s", jobTaskStatusStr(taskStatus), + TMSG_INFO(msgType)); + } + + SCH_SET_TASK_LASTMSG_TYPE(pTask, -1); + return TSDB_CODE_SUCCESS; + case TDMT_VND_FETCH_RSP: + if (lastMsgType != reqMsgType && -1 != lastMsgType) { + SCH_TASK_ELOG("rsp msg type mis-match, last sent msgType:%s, rspType:%s", TMSG_INFO(lastMsgType), + TMSG_INFO(msgType)); + SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); + } + + if (taskStatus != JOB_TASK_STATUS_EXECUTING && taskStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED) { + SCH_TASK_ELOG("rsp msg conflicted with task status, status:%s, rspType:%s", jobTaskStatusStr(taskStatus), + TMSG_INFO(msgType)); + SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); + } + + SCH_SET_TASK_LASTMSG_TYPE(pTask, -1); + return TSDB_CODE_SUCCESS; + case TDMT_VND_CREATE_TABLE_RSP: + case TDMT_VND_DROP_TABLE_RSP: + case TDMT_VND_ALTER_TABLE_RSP: + case TDMT_VND_SUBMIT_RSP: + break; + default: + SCH_TASK_ELOG("unknown rsp msg, type:%s, status:%s", TMSG_INFO(msgType), jobTaskStatusStr(taskStatus)); + SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); + } + + if (lastMsgType != reqMsgType) { + SCH_TASK_ELOG("rsp msg type mis-match, last sent msgType:%s, rspType:%s", TMSG_INFO(lastMsgType), + TMSG_INFO(msgType)); + SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); + } + + if (taskStatus != JOB_TASK_STATUS_EXECUTING && taskStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED) { + SCH_TASK_ELOG("rsp msg conflicted with task status, status:%s, rspType:%s", jobTaskStatusStr(taskStatus), + TMSG_INFO(msgType)); + SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); + } + + SCH_SET_TASK_LASTMSG_TYPE(pTask, -1); + + return TSDB_CODE_SUCCESS; +} + +// Note: no more task error processing, handled in function internal +int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, char *msg, int32_t msgSize, + int32_t rspCode) { + int32_t code = 0; + int8_t status = 0; + + if (schJobNeedToStop(pJob, &status)) { + SCH_TASK_ELOG("rsp not processed cause of job status, job status:%s, rspCode:0x%x", jobTaskStatusStr(status), + rspCode); + taosMemoryFreeClear(msg); + SCH_RET(atomic_load_32(&pJob->errCode)); + } + + SCH_ERR_JRET(schValidateReceivedMsgType(pJob, pTask, msgType)); + + switch (msgType) { + case TDMT_VND_CREATE_TABLE_RSP: { + SVCreateTbBatchRsp batchRsp = {0}; + if (msg) { + SDecoder coder = {0}; + tDecoderInit(&coder, msg, msgSize); + code = tDecodeSVCreateTbBatchRsp(&coder, &batchRsp); + if (TSDB_CODE_SUCCESS == code && batchRsp.nRsps > 0) { + for (int32_t i = 0; i < batchRsp.nRsps; ++i) { + SVCreateTbRsp *rsp = batchRsp.pRsps + i; + if (TSDB_CODE_SUCCESS != rsp->code) { + code = rsp->code; + tDecoderClear(&coder); + SCH_ERR_JRET(code); + } + } + } + tDecoderClear(&coder); + SCH_ERR_JRET(code); + } + + SCH_ERR_JRET(rspCode); + taosMemoryFreeClear(msg); + + SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); + break; + } + case TDMT_VND_DROP_TABLE_RSP: { + SVDropTbBatchRsp batchRsp = {0}; + if (msg) { + SDecoder coder = {0}; + tDecoderInit(&coder, msg, msgSize); + code = tDecodeSVDropTbBatchRsp(&coder, &batchRsp); + if (TSDB_CODE_SUCCESS == code && batchRsp.nRsps > 0) { + for (int32_t i = 0; i < batchRsp.nRsps; ++i) { + SVDropTbRsp *rsp = batchRsp.pRsps + i; + if (TSDB_CODE_SUCCESS != rsp->code) { + code = rsp->code; + tDecoderClear(&coder); + SCH_ERR_JRET(code); + } + } + } + tDecoderClear(&coder); + SCH_ERR_JRET(code); + } + + SCH_ERR_JRET(rspCode); + taosMemoryFreeClear(msg); + + SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); + break; + } + case TDMT_VND_ALTER_TABLE_RSP: { + SVAlterTbRsp rsp = {0}; + if (msg) { + SDecoder coder = {0}; + tDecoderInit(&coder, msg, msgSize); + code = tDecodeSVAlterTbRsp(&coder, &rsp); + tDecoderClear(&coder); + SCH_ERR_JRET(code); + SCH_ERR_JRET(rsp.code); + } + + SCH_ERR_JRET(rspCode); + + if (NULL == msg) { + SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT); + } + + taosMemoryFreeClear(msg); + + SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); + break; + } + case TDMT_VND_SUBMIT_RSP: { + SCH_ERR_JRET(rspCode); + + if (msg) { + SDecoder coder = {0}; + SSubmitRsp *rsp = taosMemoryMalloc(sizeof(*rsp)); + tDecoderInit(&coder, msg, msgSize); + code = tDecodeSSubmitRsp(&coder, rsp); + if (code) { + SCH_TASK_ELOG("decode submitRsp failed, code:%d", code); + tFreeSSubmitRsp(rsp); + SCH_ERR_JRET(code); + } + + if (rsp->nBlocks > 0) { + for (int32_t i = 0; i < rsp->nBlocks; ++i) { + SSubmitBlkRsp *blk = rsp->pBlocks + i; + if (TSDB_CODE_SUCCESS != blk->code) { + code = blk->code; + tFreeSSubmitRsp(rsp); + SCH_ERR_JRET(code); + } + } + } + + atomic_add_fetch_32(&pJob->resNumOfRows, rsp->affectedRows); + SCH_TASK_DLOG("submit succeed, affectedRows:%d", rsp->affectedRows); + + SCH_LOCK(SCH_WRITE, &pJob->resLock); + if (pJob->queryRes) { + SSubmitRsp *sum = pJob->queryRes; + sum->affectedRows += rsp->affectedRows; + sum->nBlocks += rsp->nBlocks; + sum->pBlocks = taosMemoryRealloc(sum->pBlocks, sum->nBlocks * sizeof(*sum->pBlocks)); + memcpy(sum->pBlocks + sum->nBlocks - rsp->nBlocks, rsp->pBlocks, rsp->nBlocks * sizeof(*sum->pBlocks)); + taosMemoryFree(rsp->pBlocks); + taosMemoryFree(rsp); + } else { + pJob->queryRes = rsp; + } + SCH_UNLOCK(SCH_WRITE, &pJob->resLock); + } + + taosMemoryFreeClear(msg); + + SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); + + break; + } + case TDMT_VND_QUERY_RSP: { + SQueryTableRsp *rsp = (SQueryTableRsp *)msg; + + SCH_ERR_JRET(rspCode); + if (NULL == msg) { + SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT); + } + SCH_ERR_JRET(rsp->code); + + SCH_ERR_JRET(schSaveJobQueryRes(pJob, rsp)); + + taosMemoryFreeClear(msg); + + SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); + + break; + } + case TDMT_VND_EXPLAIN_RSP: { + SCH_ERR_JRET(rspCode); + if (NULL == msg) { + SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT); + } + + if (!SCH_IS_EXPLAIN_JOB(pJob)) { + SCH_TASK_ELOG("invalid msg received for none explain query, msg type:%s", TMSG_INFO(msgType)); + SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT); + } + + if (pJob->resData) { + SCH_TASK_ELOG("explain result is already generated, res:%p", pJob->resData); + SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR); + } + + SExplainRsp rsp = {0}; + if (tDeserializeSExplainRsp(msg, msgSize, &rsp)) { + taosMemoryFree(rsp.subplanInfo); + SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + SRetrieveTableRsp *pRsp = NULL; + SCH_ERR_JRET(qExplainUpdateExecInfo(pJob->explainCtx, &rsp, pTask->plan->id.groupId, &pRsp)); + + if (pRsp) { + SCH_ERR_JRET(schProcessOnExplainDone(pJob, pTask, pRsp)); + } + break; + } + case TDMT_VND_FETCH_RSP: { + SRetrieveTableRsp *rsp = (SRetrieveTableRsp *)msg; + + SCH_ERR_JRET(rspCode); + if (NULL == msg) { + SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT); + } + + if (SCH_IS_EXPLAIN_JOB(pJob)) { + if (rsp->completed) { + SRetrieveTableRsp *pRsp = NULL; + SCH_ERR_JRET(qExecExplainEnd(pJob->explainCtx, &pRsp)); + if (pRsp) { + SCH_ERR_JRET(schProcessOnExplainDone(pJob, pTask, pRsp)); + } + + taosMemoryFreeClear(msg); + + return TSDB_CODE_SUCCESS; + } + + atomic_val_compare_exchange_32(&pJob->remoteFetch, 1, 0); + + SCH_ERR_JRET(schFetchFromRemote(pJob)); + + taosMemoryFreeClear(msg); + + return TSDB_CODE_SUCCESS; + } + + if (pJob->resData) { + SCH_TASK_ELOG("got fetch rsp while res already exists, res:%p", pJob->resData); + taosMemoryFreeClear(rsp); + SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR); + } + + atomic_store_ptr(&pJob->resData, rsp); + atomic_add_fetch_32(&pJob->resNumOfRows, htonl(rsp->numOfRows)); + + if (rsp->completed) { + SCH_SET_TASK_STATUS(pTask, JOB_TASK_STATUS_SUCCEED); + } + + SCH_TASK_DLOG("got fetch rsp, rows:%d, complete:%d", htonl(rsp->numOfRows), rsp->completed); + + msg = NULL; + + schProcessOnDataFetched(pJob); + break; + } + case TDMT_VND_DROP_TASK_RSP: { + // SHOULD NEVER REACH HERE + SCH_TASK_ELOG("invalid status to handle drop task rsp, refId:%" PRIx64, pJob->refId); + SCH_ERR_JRET(TSDB_CODE_SCH_INTERNAL_ERROR); + break; + } + case TDMT_SCH_LINK_BROKEN: + SCH_TASK_ELOG("link broken received, error:%x - %s", rspCode, tstrerror(rspCode)); + SCH_ERR_JRET(rspCode); + break; + default: + SCH_TASK_ELOG("unknown rsp msg, type:%d, status:%s", msgType, SCH_GET_TASK_STATUS_STR(pTask)); + SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT); + } + + return TSDB_CODE_SUCCESS; + +_return: + + taosMemoryFreeClear(msg); + + SCH_RET(schProcessOnTaskFailure(pJob, pTask, code)); +} + + +int32_t schHandleCallback(void *param, const SDataBuf *pMsg, int32_t msgType, int32_t rspCode) { + int32_t code = 0; + SSchTaskCallbackParam *pParam = (SSchTaskCallbackParam *)param; + SSchTask *pTask = NULL; + + SSchJob *pJob = schAcquireJob(pParam->refId); + if (NULL == pJob) { + qWarn("QID:0x%" PRIx64 ",TID:0x%" PRIx64 "taosAcquireRef job failed, may be dropped, refId:%" PRIx64, + pParam->queryId, pParam->taskId, pParam->refId); + SCH_ERR_JRET(TSDB_CODE_QRY_JOB_FREED); + } + + schGetTaskFromTaskList(pJob->execTasks, pParam->taskId, &pTask); + if (NULL == pTask) { + if (TDMT_VND_EXPLAIN_RSP == msgType) { + schGetTaskFromTaskList(pJob->succTasks, pParam->taskId, &pTask); + } else { + SCH_JOB_ELOG("task not found in execTask list, refId:%" PRIx64 ", taskId:%" PRIx64, pParam->refId, + pParam->taskId); + SCH_ERR_JRET(TSDB_CODE_SCH_INTERNAL_ERROR); + } + } + + if (NULL == pTask) { + SCH_JOB_ELOG("task not found in execList & succList, refId:%" PRIx64 ", taskId:%" PRIx64, pParam->refId, + pParam->taskId); + SCH_ERR_JRET(TSDB_CODE_SCH_INTERNAL_ERROR); + } + + SCH_TASK_DLOG("rsp msg received, type:%s, handle:%p, code:%s", TMSG_INFO(msgType), pMsg->handle, tstrerror(rspCode)); + + SCH_SET_TASK_HANDLE(pTask, pMsg->handle); + schUpdateTaskExecNodeHandle(pTask, pMsg->handle, rspCode); + + SCH_ERR_JRET(schHandleResponseMsg(pJob, pTask, msgType, pMsg->pData, pMsg->len, rspCode)); + +_return: + + if (pJob) { + schReleaseJob(pParam->refId); + } + + taosMemoryFreeClear(param); + SCH_RET(code); +} + +int32_t schHandleSubmitCallback(void *param, const SDataBuf *pMsg, int32_t code) { + return schHandleCallback(param, pMsg, TDMT_VND_SUBMIT_RSP, code); +} + +int32_t schHandleCreateTbCallback(void *param, const SDataBuf *pMsg, int32_t code) { + return schHandleCallback(param, pMsg, TDMT_VND_CREATE_TABLE_RSP, code); +} + +int32_t schHandleDropTbCallback(void *param, const SDataBuf *pMsg, int32_t code) { + return schHandleCallback(param, pMsg, TDMT_VND_DROP_TABLE_RSP, code); +} + +int32_t schHandleAlterTbCallback(void *param, const SDataBuf *pMsg, int32_t code) { + return schHandleCallback(param, pMsg, TDMT_VND_ALTER_TABLE_RSP, code); +} + +int32_t schHandleQueryCallback(void *param, const SDataBuf *pMsg, int32_t code) { + return schHandleCallback(param, pMsg, TDMT_VND_QUERY_RSP, code); +} + +int32_t schHandleFetchCallback(void *param, const SDataBuf *pMsg, int32_t code) { + return schHandleCallback(param, pMsg, TDMT_VND_FETCH_RSP, code); +} + +int32_t schHandleExplainCallback(void *param, const SDataBuf *pMsg, int32_t code) { + return schHandleCallback(param, pMsg, TDMT_VND_EXPLAIN_RSP, code); +} + +int32_t schHandleDropCallback(void *param, const SDataBuf *pMsg, int32_t code) { + SSchTaskCallbackParam *pParam = (SSchTaskCallbackParam *)param; + qDebug("QID:%" PRIx64 ",TID:%" PRIx64 " drop task rsp received, code:%x", pParam->queryId, pParam->taskId, code); + taosMemoryFreeClear(param); + return TSDB_CODE_SUCCESS; +} + +int32_t schHandleLinkBrokenCallback(void *param, const SDataBuf *pMsg, int32_t code) { + SSchCallbackParamHeader *head = (SSchCallbackParamHeader *)param; + rpcReleaseHandle(pMsg->handle, TAOS_CONN_CLIENT); + + qDebug("handle %p is broken", pMsg->handle); + + if (head->isHbParam) { + SSchHbCallbackParam *hbParam = (SSchHbCallbackParam *)param; + SSchTrans trans = {.pTrans = hbParam->pTrans, .pHandle = NULL}; + SCH_ERR_RET(schUpdateHbConnection(&hbParam->nodeEpId, &trans)); + + SCH_ERR_RET(schBuildAndSendHbMsg(&hbParam->nodeEpId)); + } else { + SCH_ERR_RET(schHandleCallback(param, pMsg, TDMT_SCH_LINK_BROKEN, code)); + } + + return TSDB_CODE_SUCCESS; +} + +int32_t schGenerateCallBackInfo(SSchJob *pJob, SSchTask *pTask, int32_t msgType, SMsgSendInfo **pMsgSendInfo) { + int32_t code = 0; + SMsgSendInfo *msgSendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo)); + if (NULL == msgSendInfo) { + SCH_TASK_ELOG("calloc %d failed", (int32_t)sizeof(SMsgSendInfo)); + SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + SSchTaskCallbackParam *param = taosMemoryCalloc(1, sizeof(SSchTaskCallbackParam)); + if (NULL == param) { + SCH_TASK_ELOG("calloc %d failed", (int32_t)sizeof(SSchTaskCallbackParam)); + SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + __async_send_cb_fn_t fp = NULL; + SCH_ERR_JRET(schGetCallbackFp(msgType, &fp)); + + param->queryId = pJob->queryId; + param->refId = pJob->refId; + param->taskId = SCH_TASK_ID(pTask); + param->transport = pJob->pTrans; + + msgSendInfo->param = param; + msgSendInfo->fp = fp; + + *pMsgSendInfo = msgSendInfo; + + return TSDB_CODE_SUCCESS; + +_return: + + taosMemoryFree(param); + taosMemoryFree(msgSendInfo); + + SCH_RET(code); +} + + +int32_t schGetCallbackFp(int32_t msgType, __async_send_cb_fn_t *fp) { + switch (msgType) { + case TDMT_VND_CREATE_TABLE: + *fp = schHandleCreateTbCallback; + break; + case TDMT_VND_DROP_TABLE: + *fp = schHandleDropTbCallback; + break; + case TDMT_VND_ALTER_TABLE: + *fp = schHandleAlterTbCallback; + break; + case TDMT_VND_SUBMIT: + *fp = schHandleSubmitCallback; + break; + case TDMT_VND_QUERY: + *fp = schHandleQueryCallback; + break; + case TDMT_VND_EXPLAIN: + *fp = schHandleExplainCallback; + break; + case TDMT_VND_FETCH: + *fp = schHandleFetchCallback; + break; + case TDMT_VND_DROP_TASK: + *fp = schHandleDropCallback; + break; + case TDMT_VND_QUERY_HEARTBEAT: + *fp = schHandleHbCallback; + break; + case TDMT_SCH_LINK_BROKEN: + *fp = schHandleLinkBrokenCallback; + break; + default: + qError("unknown msg type for callback, msgType:%d", msgType); + SCH_ERR_RET(TSDB_CODE_QRY_APP_ERROR); + } + + return TSDB_CODE_SUCCESS; +} + + +int32_t schMakeHbCallbackParam(SSchJob *pJob, SSchTask *pTask, void **pParam) { + SSchHbCallbackParam *param = taosMemoryCalloc(1, sizeof(SSchHbCallbackParam)); + if (NULL == param) { + SCH_TASK_ELOG("calloc %d failed", (int32_t)sizeof(SSchHbCallbackParam)); + SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + param->head.isHbParam = true; + + SQueryNodeAddr *addr = taosArrayGet(pTask->candidateAddrs, pTask->candidateIdx); + + param->nodeEpId.nodeId = addr->nodeId; + SEp* pEp = SCH_GET_CUR_EP(addr); + strcpy(param->nodeEpId.ep.fqdn, pEp->fqdn); + param->nodeEpId.ep.port = pEp->port; + param->pTrans = pJob->pTrans; + + *pParam = param; + + return TSDB_CODE_SUCCESS; +} + +int32_t schCloneHbRpcCtx(SRpcCtx *pSrc, SRpcCtx *pDst) { + int32_t code = 0; + memcpy(pDst, pSrc, sizeof(SRpcCtx)); + pDst->brokenVal.val = NULL; + pDst->args = NULL; + + SCH_ERR_RET(schCloneSMsgSendInfo(pSrc->brokenVal.val, &pDst->brokenVal.val)); + + pDst->args = taosHashInit(1, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_ENTRY_LOCK); + if (NULL == pDst->args) { + qError("taosHashInit %d RpcCtx failed", 1); + SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + SRpcCtxVal dst = {0}; + void *pIter = taosHashIterate(pSrc->args, NULL); + while (pIter) { + SRpcCtxVal *pVal = (SRpcCtxVal *)pIter; + int32_t *msgType = taosHashGetKey(pIter, NULL); + + dst = *pVal; + dst.val = NULL; + + SCH_ERR_JRET(schCloneSMsgSendInfo(pVal->val, &dst.val)); + + if (taosHashPut(pDst->args, msgType, sizeof(*msgType), &dst, sizeof(dst))) { + qError("taosHashPut msg %d to rpcCtx failed", *msgType); + (*pSrc->freeFunc)(dst.val); + SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + pIter = taosHashIterate(pSrc->args, pIter); + } + + return TSDB_CODE_SUCCESS; + +_return: + + schFreeRpcCtx(pDst); + SCH_RET(code); +} + + +int32_t schMakeHbRpcCtx(SSchJob *pJob, SSchTask *pTask, SRpcCtx *pCtx) { + int32_t code = 0; + SSchHbCallbackParam *param = NULL; + SMsgSendInfo *pMsgSendInfo = NULL; + SQueryNodeAddr *addr = taosArrayGet(pTask->candidateAddrs, pTask->candidateIdx); + SQueryNodeEpId epId = {0}; + + epId.nodeId = addr->nodeId; + memcpy(&epId.ep, SCH_GET_CUR_EP(addr), sizeof(SEp)); + + pCtx->args = taosHashInit(1, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_ENTRY_LOCK); + if (NULL == pCtx->args) { + SCH_TASK_ELOG("taosHashInit %d RpcCtx failed", 1); + SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + pMsgSendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo)); + if (NULL == pMsgSendInfo) { + SCH_TASK_ELOG("calloc %d failed", (int32_t)sizeof(SMsgSendInfo)); + SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + param = taosMemoryCalloc(1, sizeof(SSchHbCallbackParam)); + if (NULL == param) { + SCH_TASK_ELOG("calloc %d failed", (int32_t)sizeof(SSchHbCallbackParam)); + SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + int32_t msgType = TDMT_VND_QUERY_HEARTBEAT_RSP; + __async_send_cb_fn_t fp = NULL; + SCH_ERR_JRET(schGetCallbackFp(TDMT_VND_QUERY_HEARTBEAT, &fp)); + + param->nodeEpId = epId; + param->pTrans = pJob->pTrans; + + pMsgSendInfo->param = param; + pMsgSendInfo->fp = fp; + + SRpcCtxVal ctxVal = {.val = pMsgSendInfo, .clone = schCloneSMsgSendInfo}; + if (taosHashPut(pCtx->args, &msgType, sizeof(msgType), &ctxVal, sizeof(ctxVal))) { + SCH_TASK_ELOG("taosHashPut msg %d to rpcCtx failed", msgType); + SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + SCH_ERR_JRET(schMakeBrokenLinkVal(pJob, pTask, &pCtx->brokenVal, true)); + pCtx->freeFunc = schFreeRpcCtxVal; + + return TSDB_CODE_SUCCESS; + +_return: + + taosHashCleanup(pCtx->args); + taosMemoryFreeClear(param); + taosMemoryFreeClear(pMsgSendInfo); + + SCH_RET(code); +} + +int32_t schRegisterHbConnection(SSchJob *pJob, SSchTask *pTask, SQueryNodeEpId *epId, bool *exist) { + int32_t code = 0; + SSchHbTrans hb = {0}; + + hb.trans.pTrans = pJob->pTrans; + + SCH_ERR_RET(schMakeHbRpcCtx(pJob, pTask, &hb.rpcCtx)); + + code = taosHashPut(schMgmt.hbConnections, epId, sizeof(SQueryNodeEpId), &hb, sizeof(SSchHbTrans)); + if (code) { + schFreeRpcCtx(&hb.rpcCtx); + + if (HASH_NODE_EXIST(code)) { + *exist = true; + return TSDB_CODE_SUCCESS; + } + + qError("taosHashPut hb trans failed, nodeId:%d, fqdn:%s, port:%d", epId->nodeId, epId->ep.fqdn, epId->ep.port); + SCH_ERR_RET(code); + } + + return TSDB_CODE_SUCCESS; +} + + +int32_t schBuildAndSendHbMsg(SQueryNodeEpId *nodeEpId) { + SSchedulerHbReq req = {0}; + int32_t code = 0; + SRpcCtx rpcCtx = {0}; + SSchTrans trans = {0}; + int32_t msgType = TDMT_VND_QUERY_HEARTBEAT; + + req.header.vgId = nodeEpId->nodeId; + req.sId = schMgmt.sId; + memcpy(&req.epId, nodeEpId, sizeof(SQueryNodeEpId)); + + SSchHbTrans *hb = taosHashGet(schMgmt.hbConnections, nodeEpId, sizeof(SQueryNodeEpId)); + if (NULL == hb) { + qError("taosHashGet hb connection failed, nodeId:%d, fqdn:%s, port:%d", nodeEpId->nodeId, nodeEpId->ep.fqdn, + nodeEpId->ep.port); + SCH_ERR_RET(code); + } + + SCH_LOCK(SCH_WRITE, &hb->lock); + code = schCloneHbRpcCtx(&hb->rpcCtx, &rpcCtx); + memcpy(&trans, &hb->trans, sizeof(trans)); + SCH_UNLOCK(SCH_WRITE, &hb->lock); + + SCH_ERR_RET(code); + + int32_t msgSize = tSerializeSSchedulerHbReq(NULL, 0, &req); + if (msgSize < 0) { + qError("tSerializeSSchedulerHbReq hbReq failed, size:%d", msgSize); + SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + void *msg = taosMemoryCalloc(1, msgSize); + if (NULL == msg) { + qError("calloc hb req %d failed", msgSize); + SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + if (tSerializeSSchedulerHbReq(msg, msgSize, &req) < 0) { + qError("tSerializeSSchedulerHbReq hbReq failed, size:%d", msgSize); + SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + SMsgSendInfo *pMsgSendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo)); + if (NULL == pMsgSendInfo) { + qError("calloc SMsgSendInfo failed"); + SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + SSchTaskCallbackParam *param = taosMemoryCalloc(1, sizeof(SSchTaskCallbackParam)); + if (NULL == param) { + qError("calloc SSchTaskCallbackParam failed"); + SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + __async_send_cb_fn_t fp = NULL; + SCH_ERR_JRET(schGetCallbackFp(msgType, &fp)); + + param->transport = trans.pTrans; + + pMsgSendInfo->param = param; + pMsgSendInfo->msgInfo.pData = msg; + pMsgSendInfo->msgInfo.len = msgSize; + pMsgSendInfo->msgInfo.handle = trans.pHandle; + pMsgSendInfo->msgType = msgType; + pMsgSendInfo->fp = fp; + + int64_t transporterId = 0; + SEpSet epSet = {.inUse = 0, .numOfEps = 1}; + memcpy(&epSet.eps[0], &nodeEpId->ep, sizeof(nodeEpId->ep)); + + qDebug("start to send hb msg, pTrans:%p, pHandle:%p, fqdn:%s, port:%d", trans.pTrans, trans.pHandle, + nodeEpId->ep.fqdn, nodeEpId->ep.port); + + code = asyncSendMsgToServerExt(trans.pTrans, &epSet, &transporterId, pMsgSendInfo, true, &rpcCtx); + if (code) { + qError("fail to send hb msg, pTrans:%p, pHandle:%p, fqdn:%s, port:%d, error:%x - %s", trans.pTrans, + trans.pHandle, nodeEpId->ep.fqdn, nodeEpId->ep.port, code, tstrerror(code)); + SCH_ERR_JRET(code); + } + + qDebug("hb msg sent"); + return TSDB_CODE_SUCCESS; + +_return: + + taosMemoryFreeClear(msg); + taosMemoryFreeClear(param); + taosMemoryFreeClear(pMsgSendInfo); + schFreeRpcCtx(&rpcCtx); + SCH_RET(code); +} + + +int32_t schEnsureHbConnection(SSchJob *pJob, SSchTask *pTask) { + SQueryNodeAddr *addr = taosArrayGet(pTask->candidateAddrs, pTask->candidateIdx); + SQueryNodeEpId epId = {0}; + + epId.nodeId = addr->nodeId; + + SEp* pEp = SCH_GET_CUR_EP(addr); + strcpy(epId.ep.fqdn, pEp->fqdn); + epId.ep.port = pEp->port; + + SSchHbTrans *hb = taosHashGet(schMgmt.hbConnections, &epId, sizeof(SQueryNodeEpId)); + if (NULL == hb) { + bool exist = false; + SCH_ERR_RET(schRegisterHbConnection(pJob, pTask, &epId, &exist)); + if (!exist) { + SCH_ERR_RET(schBuildAndSendHbMsg(&epId)); + } + } + + return TSDB_CODE_SUCCESS; +} + +int32_t schUpdateHbConnection(SQueryNodeEpId *epId, SSchTrans *trans) { + int32_t code = 0; + SSchHbTrans *hb = NULL; + + hb = taosHashGet(schMgmt.hbConnections, epId, sizeof(SQueryNodeEpId)); + if (NULL == hb) { + qError("taosHashGet hb connection failed, nodeId:%d, fqdn:%s, port:%d", epId->nodeId, epId->ep.fqdn, epId->ep.port); + SCH_ERR_RET(TSDB_CODE_QRY_APP_ERROR); + } + + SCH_LOCK(SCH_WRITE, &hb->lock); + memcpy(&hb->trans, trans, sizeof(*trans)); + SCH_UNLOCK(SCH_WRITE, &hb->lock); + + qDebug("hb connection updated, sId:%" PRIx64 ", nodeId:%d, fqdn:%s, port:%d, pTrans:%p, pHandle:%p", schMgmt.sId, + epId->nodeId, epId->ep.fqdn, epId->ep.port, trans->pTrans, trans->pHandle); + + return TSDB_CODE_SUCCESS; +} + +int32_t schHandleHbCallback(void *param, const SDataBuf *pMsg, int32_t code) { + SSchedulerHbRsp rsp = {0}; + SSchTaskCallbackParam *pParam = (SSchTaskCallbackParam *)param; + + if (code) { + qError("hb rsp error:%s", tstrerror(code)); + SCH_ERR_JRET(code); + } + + if (tDeserializeSSchedulerHbRsp(pMsg->pData, pMsg->len, &rsp)) { + qError("invalid hb rsp msg, size:%d", pMsg->len); + SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT); + } + + SSchTrans trans = {0}; + trans.pTrans = pParam->transport; + trans.pHandle = pMsg->handle; + + SCH_ERR_JRET(schUpdateHbConnection(&rsp.epId, &trans)); + + int32_t taskNum = (int32_t)taosArrayGetSize(rsp.taskStatus); + qDebug("%d task status in hb rsp, nodeId:%d, fqdn:%s, port:%d", taskNum, rsp.epId.nodeId, rsp.epId.ep.fqdn, + rsp.epId.ep.port); + + for (int32_t i = 0; i < taskNum; ++i) { + STaskStatus *taskStatus = taosArrayGet(rsp.taskStatus, i); + + SSchJob *pJob = schAcquireJob(taskStatus->refId); + if (NULL == pJob) { + qWarn("job not found, refId:0x%" PRIx64 ",QID:0x%" PRIx64 ",TID:0x%" PRIx64, taskStatus->refId, + taskStatus->queryId, taskStatus->taskId); + // TODO DROP TASK FROM SERVER!!!! + continue; + } + + // TODO + + SCH_JOB_DLOG("TID:0x%" PRIx64 " task status in server: %s", taskStatus->taskId, + jobTaskStatusStr(taskStatus->status)); + + schReleaseJob(taskStatus->refId); + } + +_return: + + tFreeSSchedulerHbRsp(&rsp); + taosMemoryFree(param); + + SCH_RET(code); +} + +int32_t schMakeCallbackParam(SSchJob *pJob, SSchTask *pTask, void **pParam) { + SSchTaskCallbackParam *param = taosMemoryCalloc(1, sizeof(SSchTaskCallbackParam)); + if (NULL == param) { + SCH_TASK_ELOG("calloc %d failed", (int32_t)sizeof(SSchTaskCallbackParam)); + SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + param->queryId = pJob->queryId; + param->refId = pJob->refId; + param->taskId = SCH_TASK_ID(pTask); + param->transport = pJob->pTrans; + + *pParam = param; + + return TSDB_CODE_SUCCESS; +} + +int32_t schMakeBrokenLinkVal(SSchJob *pJob, SSchTask *pTask, SRpcBrokenlinkVal *brokenVal, bool isHb) { + int32_t code = 0; + SMsgSendInfo *pMsgSendInfo = NULL; + + pMsgSendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo)); + if (NULL == pMsgSendInfo) { + SCH_TASK_ELOG("calloc %d failed", (int32_t)sizeof(SMsgSendInfo)); + SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + if (isHb) { + SCH_ERR_JRET(schMakeHbCallbackParam(pJob, pTask, &pMsgSendInfo->param)); + } else { + SCH_ERR_JRET(schMakeCallbackParam(pJob, pTask, &pMsgSendInfo->param)); + } + + int32_t msgType = TDMT_SCH_LINK_BROKEN; + __async_send_cb_fn_t fp = NULL; + SCH_ERR_JRET(schGetCallbackFp(msgType, &fp)); + + pMsgSendInfo->fp = fp; + + brokenVal->msgType = msgType; + brokenVal->val = pMsgSendInfo; + brokenVal->clone = schCloneSMsgSendInfo; + + return TSDB_CODE_SUCCESS; + +_return: + + taosMemoryFreeClear(pMsgSendInfo->param); + taosMemoryFreeClear(pMsgSendInfo); + + SCH_RET(code); +} + +int32_t schMakeQueryRpcCtx(SSchJob *pJob, SSchTask *pTask, SRpcCtx *pCtx) { + int32_t code = 0; + SMsgSendInfo *pExplainMsgSendInfo = NULL; + + pCtx->args = taosHashInit(1, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_ENTRY_LOCK); + if (NULL == pCtx->args) { + SCH_TASK_ELOG("taosHashInit %d RpcCtx failed", 1); + SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + SCH_ERR_JRET(schGenerateCallBackInfo(pJob, pTask, TDMT_VND_EXPLAIN, &pExplainMsgSendInfo)); + + int32_t msgType = TDMT_VND_EXPLAIN_RSP; + SRpcCtxVal ctxVal = {.val = pExplainMsgSendInfo, .clone = schCloneSMsgSendInfo}; + if (taosHashPut(pCtx->args, &msgType, sizeof(msgType), &ctxVal, sizeof(ctxVal))) { + SCH_TASK_ELOG("taosHashPut msg %d to rpcCtx failed", msgType); + SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + SCH_ERR_JRET(schMakeBrokenLinkVal(pJob, pTask, &pCtx->brokenVal, false)); + pCtx->freeFunc = schFreeRpcCtxVal; + + return TSDB_CODE_SUCCESS; + +_return: + + taosHashCleanup(pCtx->args); + + if (pExplainMsgSendInfo) { + taosMemoryFreeClear(pExplainMsgSendInfo->param); + taosMemoryFreeClear(pExplainMsgSendInfo); + } + + SCH_RET(code); +} + +int32_t schCloneCallbackParam(SSchCallbackParamHeader *pSrc, SSchCallbackParamHeader **pDst) { + if (pSrc->isHbParam) { + SSchHbCallbackParam *dst = taosMemoryMalloc(sizeof(SSchHbCallbackParam)); + if (NULL == dst) { + qError("malloc SSchHbCallbackParam failed"); + SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + memcpy(dst, pSrc, sizeof(*dst)); + *pDst = (SSchCallbackParamHeader *)dst; + + return TSDB_CODE_SUCCESS; + } + + SSchTaskCallbackParam *dst = taosMemoryMalloc(sizeof(SSchTaskCallbackParam)); + if (NULL == dst) { + qError("malloc SSchTaskCallbackParam failed"); + SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + memcpy(dst, pSrc, sizeof(*dst)); + *pDst = (SSchCallbackParamHeader *)dst; + + return TSDB_CODE_SUCCESS; +} + +int32_t schCloneSMsgSendInfo(void *src, void **dst) { + SMsgSendInfo *pSrc = src; + int32_t code = 0; + SMsgSendInfo *pDst = taosMemoryMalloc(sizeof(*pSrc)); + if (NULL == pDst) { + qError("malloc SMsgSendInfo for rpcCtx failed, len:%d", (int32_t)sizeof(*pSrc)); + SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + memcpy(pDst, pSrc, sizeof(*pSrc)); + pDst->param = NULL; + + SCH_ERR_JRET(schCloneCallbackParam(pSrc->param, (SSchCallbackParamHeader **)&pDst->param)); + + *dst = pDst; + + return TSDB_CODE_SUCCESS; + +_return: + + taosMemoryFreeClear(pDst); + SCH_RET(code); +} + + +int32_t schAsyncSendMsg(SSchJob *pJob, SSchTask *pTask, void *transport, SEpSet *epSet, int32_t msgType, void *msg, + uint32_t msgSize, bool persistHandle, SRpcCtx *ctx) { + int32_t code = 0; + + SSchTrans *trans = (SSchTrans *)transport; + + SMsgSendInfo *pMsgSendInfo = NULL; + SCH_ERR_JRET(schGenerateCallBackInfo(pJob, pTask, msgType, &pMsgSendInfo)); + + pMsgSendInfo->msgInfo.pData = msg; + pMsgSendInfo->msgInfo.len = msgSize; + pMsgSendInfo->msgInfo.handle = trans->pHandle; + pMsgSendInfo->msgType = msgType; + + qDebug("start to send %s msg to node[%d,%s,%d], refId:%" PRIx64 "pTrans:%p, pHandle:%p", TMSG_INFO(msgType), + ntohl(((SMsgHead *)msg)->vgId), epSet->eps[epSet->inUse].fqdn, epSet->eps[epSet->inUse].port, pJob->refId, + trans->pTrans, trans->pHandle); + + int64_t transporterId = 0; + code = asyncSendMsgToServerExt(trans->pTrans, epSet, &transporterId, pMsgSendInfo, persistHandle, ctx); + if (code) { + SCH_ERR_JRET(code); + } + + SCH_TASK_DLOG("req msg sent, refId:%" PRIx64 ", type:%d, %s", pJob->refId, msgType, TMSG_INFO(msgType)); + return TSDB_CODE_SUCCESS; + +_return: + + if (pMsgSendInfo) { + taosMemoryFreeClear(pMsgSendInfo->param); + taosMemoryFreeClear(pMsgSendInfo); + } + + SCH_RET(code); +} + +int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr, int32_t msgType) { + uint32_t msgSize = 0; + void *msg = NULL; + int32_t code = 0; + bool isCandidateAddr = false; + bool persistHandle = false; + SRpcCtx rpcCtx = {0}; + + if (NULL == addr) { + addr = taosArrayGet(pTask->candidateAddrs, pTask->candidateIdx); + isCandidateAddr = true; + } + + SEpSet epSet = addr->epSet; + + switch (msgType) { + case TDMT_VND_CREATE_TABLE: + case TDMT_VND_DROP_TABLE: + case TDMT_VND_ALTER_TABLE: + case TDMT_VND_SUBMIT: { + msgSize = pTask->msgLen; + msg = taosMemoryCalloc(1, msgSize); + if (NULL == msg) { + SCH_TASK_ELOG("calloc %d failed", msgSize); + SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + memcpy(msg, pTask->msg, msgSize); + break; + } + + case TDMT_VND_QUERY: { + SCH_ERR_RET(schMakeQueryRpcCtx(pJob, pTask, &rpcCtx)); + + uint32_t len = strlen(pJob->sql); + msgSize = sizeof(SSubQueryMsg) + pTask->msgLen + len; + msg = taosMemoryCalloc(1, msgSize); + if (NULL == msg) { + SCH_TASK_ELOG("calloc %d failed", msgSize); + SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + SSubQueryMsg *pMsg = msg; + pMsg->header.vgId = htonl(addr->nodeId); + pMsg->sId = htobe64(schMgmt.sId); + pMsg->queryId = htobe64(pJob->queryId); + pMsg->taskId = htobe64(pTask->taskId); + pMsg->refId = htobe64(pJob->refId); + pMsg->taskType = TASK_TYPE_TEMP; + pMsg->explain = SCH_IS_EXPLAIN_JOB(pJob); + pMsg->phyLen = htonl(pTask->msgLen); + pMsg->sqlLen = htonl(len); + + memcpy(pMsg->msg, pJob->sql, len); + memcpy(pMsg->msg + len, pTask->msg, pTask->msgLen); + + persistHandle = true; + break; + } + case TDMT_VND_FETCH: { + msgSize = sizeof(SResFetchReq); + msg = taosMemoryCalloc(1, msgSize); + if (NULL == msg) { + SCH_TASK_ELOG("calloc %d failed", msgSize); + SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + SResFetchReq *pMsg = msg; + + pMsg->header.vgId = htonl(addr->nodeId); + + pMsg->sId = htobe64(schMgmt.sId); + pMsg->queryId = htobe64(pJob->queryId); + pMsg->taskId = htobe64(pTask->taskId); + + break; + } + case TDMT_VND_DROP_TASK: { + msgSize = sizeof(STaskDropReq); + msg = taosMemoryCalloc(1, msgSize); + if (NULL == msg) { + SCH_TASK_ELOG("calloc %d failed", msgSize); + SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + STaskDropReq *pMsg = msg; + + pMsg->header.vgId = htonl(addr->nodeId); + + pMsg->sId = htobe64(schMgmt.sId); + pMsg->queryId = htobe64(pJob->queryId); + pMsg->taskId = htobe64(pTask->taskId); + pMsg->refId = htobe64(pJob->refId); + break; + } + case TDMT_VND_QUERY_HEARTBEAT: { + SCH_ERR_RET(schMakeHbRpcCtx(pJob, pTask, &rpcCtx)); + + SSchedulerHbReq req = {0}; + req.sId = schMgmt.sId; + req.header.vgId = addr->nodeId; + req.epId.nodeId = addr->nodeId; + memcpy(&req.epId.ep, SCH_GET_CUR_EP(addr), sizeof(SEp)); + + msgSize = tSerializeSSchedulerHbReq(NULL, 0, &req); + if (msgSize < 0) { + SCH_JOB_ELOG("tSerializeSSchedulerHbReq hbReq failed, size:%d", msgSize); + SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + msg = taosMemoryCalloc(1, msgSize); + if (NULL == msg) { + SCH_JOB_ELOG("calloc %d failed", msgSize); + SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + if (tSerializeSSchedulerHbReq(msg, msgSize, &req) < 0) { + SCH_JOB_ELOG("tSerializeSSchedulerHbReq hbReq failed, size:%d", msgSize); + SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + persistHandle = true; + break; + } + default: + SCH_TASK_ELOG("unknown msg type to send, msgType:%d", msgType); + SCH_ERR_RET(TSDB_CODE_SCH_INTERNAL_ERROR); + break; + } + + SCH_SET_TASK_LASTMSG_TYPE(pTask, msgType); + + SSchTrans trans = {.pTrans = pJob->pTrans, .pHandle = SCH_GET_TASK_HANDLE(pTask)}; + SCH_ERR_JRET(schAsyncSendMsg(pJob, pTask, &trans, &epSet, msgType, msg, msgSize, persistHandle, + (rpcCtx.args ? &rpcCtx : NULL))); + + if (msgType == TDMT_VND_QUERY) { + SCH_ERR_RET(schRecordTaskExecNode(pJob, pTask, addr, trans.pHandle)); + } + + return TSDB_CODE_SUCCESS; + +_return: + + SCH_SET_TASK_LASTMSG_TYPE(pTask, -1); + schFreeRpcCtx(&rpcCtx); + + taosMemoryFreeClear(msg); + SCH_RET(code); +} + + + diff --git a/source/libs/scheduler/src/schUtil.c b/source/libs/scheduler/src/schUtil.c new file mode 100644 index 0000000000000000000000000000000000000000..81c95ea976e0c685fa1585df6dbb42bed75fd0c8 --- /dev/null +++ b/source/libs/scheduler/src/schUtil.c @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "catalog.h" +#include "command.h" +#include "query.h" +#include "schedulerInt.h" +#include "tmsg.h" +#include "tref.h" +#include "trpc.h" + +void schCloseJobRef(void) { + if (!atomic_load_8((int8_t *)&schMgmt.exit)) { + return; + } + + SCH_LOCK(SCH_WRITE, &schMgmt.lock); + if (atomic_load_32(&schMgmt.jobNum) <= 0 && schMgmt.jobRef >= 0) { + taosCloseRef(schMgmt.jobRef); + schMgmt.jobRef = -1; + } + SCH_UNLOCK(SCH_WRITE, &schMgmt.lock); +} + +uint64_t schGenTaskId(void) { return atomic_add_fetch_64(&schMgmt.taskId, 1); } + +uint64_t schGenUUID(void) { + static uint64_t hashId = 0; + static int32_t requestSerialId = 0; + + if (hashId == 0) { + char uid[64] = {0}; + int32_t code = taosGetSystemUUID(uid, tListLen(uid)); + if (code != TSDB_CODE_SUCCESS) { + qError("Failed to get the system uid, reason:%s", tstrerror(TAOS_SYSTEM_ERROR(errno))); + } else { + hashId = MurmurHash3_32(uid, strlen(uid)); + } + } + + int64_t ts = taosGetTimestampMs(); + uint64_t pid = taosGetPId(); + int32_t val = atomic_add_fetch_32(&requestSerialId, 1); + + uint64_t id = ((hashId & 0x0FFF) << 52) | ((pid & 0x0FFF) << 40) | ((ts & 0xFFFFFF) << 16) | (val & 0xFFFF); + return id; +} + + +void schFreeRpcCtxVal(const void *arg) { + if (NULL == arg) { + return; + } + + SMsgSendInfo *pMsgSendInfo = (SMsgSendInfo *)arg; + taosMemoryFreeClear(pMsgSendInfo->param); + taosMemoryFreeClear(pMsgSendInfo->msgInfo.pData); + taosMemoryFreeClear(pMsgSendInfo); +} + +void schFreeRpcCtx(SRpcCtx *pCtx) { + if (NULL == pCtx) { + return; + } + void *pIter = taosHashIterate(pCtx->args, NULL); + while (pIter) { + SRpcCtxVal *ctxVal = (SRpcCtxVal *)pIter; + + (*pCtx->freeFunc)(ctxVal->val); + + pIter = taosHashIterate(pCtx->args, pIter); + } + + taosHashCleanup(pCtx->args); + + (*pCtx->freeFunc)(pCtx->brokenVal.val); +} + + diff --git a/source/libs/scheduler/src/scheduler.c b/source/libs/scheduler/src/scheduler.c index dcd87557aa62101a8cceae9f0f191fe2cae53e3b..3ecc4f4a301fa3a36b17a1d920bcf1c6352507b1 100644 --- a/source/libs/scheduler/src/scheduler.c +++ b/source/libs/scheduler/src/scheduler.c @@ -25,2498 +25,6 @@ SSchedulerMgmt schMgmt = { .jobRef = -1, }; -FORCE_INLINE SSchJob *schAcquireJob(int64_t refId) { return (SSchJob *)taosAcquireRef(schMgmt.jobRef, refId); } - -FORCE_INLINE int32_t schReleaseJob(int64_t refId) { return taosReleaseRef(schMgmt.jobRef, refId); } - -uint64_t schGenTaskId(void) { return atomic_add_fetch_64(&schMgmt.taskId, 1); } - -#if 0 -uint64_t schGenUUID(void) { - static uint64_t hashId = 0; - static int32_t requestSerialId = 0; - - if (hashId == 0) { - char uid[64]; - int32_t code = taosGetSystemUUID(uid, tListLen(uid)); - if (code != TSDB_CODE_SUCCESS) { - qError("Failed to get the system uid, reason:%s", tstrerror(TAOS_SYSTEM_ERROR(errno))); - } else { - hashId = MurmurHash3_32(uid, strlen(uid)); - } - } - - int64_t ts = taosGetTimestampMs(); - uint64_t pid = taosGetPId(); - int32_t val = atomic_add_fetch_32(&requestSerialId, 1); - - uint64_t id = ((hashId & 0x0FFF) << 52) | ((pid & 0x0FFF) << 40) | ((ts & 0xFFFFFF) << 16) | (val & 0xFFFF); - return id; -} -#endif - -int32_t schInitTask(SSchJob *pJob, SSchTask *pTask, SSubplan *pPlan, SSchLevel *pLevel) { - pTask->plan = pPlan; - pTask->level = pLevel; - SCH_SET_TASK_STATUS(pTask, JOB_TASK_STATUS_NOT_START); - pTask->taskId = schGenTaskId(); - pTask->execNodes = taosArrayInit(SCH_MAX_CANDIDATE_EP_NUM, sizeof(SSchNodeInfo)); - if (NULL == pTask->execNodes) { - SCH_TASK_ELOG("taosArrayInit %d execNodes failed", SCH_MAX_CANDIDATE_EP_NUM); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - return TSDB_CODE_SUCCESS; -} - -int32_t schInitJob(SSchJob **pSchJob, SQueryPlan *pDag, void *transport, SArray *pNodeList, const char *sql, - int64_t startTs, bool syncSchedule) { - int32_t code = 0; - int64_t refId = -1; - SSchJob *pJob = taosMemoryCalloc(1, sizeof(SSchJob)); - if (NULL == pJob) { - qError("QID:%" PRIx64 " calloc %d failed", pDag->queryId, (int32_t)sizeof(SSchJob)); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - pJob->attr.explainMode = pDag->explainInfo.mode; - pJob->attr.syncSchedule = syncSchedule; - pJob->transport = transport; - pJob->sql = sql; - - if (pNodeList != NULL) { - pJob->nodeList = taosArrayDup(pNodeList); - } - - SCH_ERR_JRET(schValidateAndBuildJob(pDag, pJob)); - - if (SCH_IS_EXPLAIN_JOB(pJob)) { - SCH_ERR_JRET(qExecExplainBegin(pDag, &pJob->explainCtx, startTs)); - } - - pJob->execTasks = - taosHashInit(pDag->numOfSubplans, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), false, HASH_ENTRY_LOCK); - if (NULL == pJob->execTasks) { - SCH_JOB_ELOG("taosHashInit %d execTasks failed", pDag->numOfSubplans); - SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - pJob->succTasks = - taosHashInit(pDag->numOfSubplans, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), false, HASH_ENTRY_LOCK); - if (NULL == pJob->succTasks) { - SCH_JOB_ELOG("taosHashInit %d succTasks failed", pDag->numOfSubplans); - SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - pJob->failTasks = - taosHashInit(pDag->numOfSubplans, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), false, HASH_ENTRY_LOCK); - if (NULL == pJob->failTasks) { - SCH_JOB_ELOG("taosHashInit %d failTasks failed", pDag->numOfSubplans); - SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - tsem_init(&pJob->rspSem, 0, 0); - - refId = taosAddRef(schMgmt.jobRef, pJob); - if (refId < 0) { - SCH_JOB_ELOG("taosAddRef job failed, error:%s", tstrerror(terrno)); - SCH_ERR_JRET(terrno); - } - - atomic_add_fetch_32(&schMgmt.jobNum, 1); - - if (NULL == schAcquireJob(refId)) { - SCH_JOB_ELOG("schAcquireJob job failed, refId:%" PRIx64, refId); - SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR); - } - - pJob->refId = refId; - - SCH_JOB_DLOG("job refId:%" PRIx64, pJob->refId); - - pJob->status = JOB_TASK_STATUS_NOT_START; - - *pSchJob = pJob; - - return TSDB_CODE_SUCCESS; - -_return: - - if (refId < 0) { - schFreeJobImpl(pJob); - } else { - taosRemoveRef(schMgmt.jobRef, refId); - } - SCH_RET(code); -} - -void schFreeRpcCtx(SRpcCtx *pCtx) { - if (NULL == pCtx) { - return; - } - void *pIter = taosHashIterate(pCtx->args, NULL); - while (pIter) { - SRpcCtxVal *ctxVal = (SRpcCtxVal *)pIter; - - (*ctxVal->freeFunc)(ctxVal->val); - - pIter = taosHashIterate(pCtx->args, pIter); - } - - taosHashCleanup(pCtx->args); - - if (pCtx->brokenVal.freeFunc) { - (*pCtx->brokenVal.freeFunc)(pCtx->brokenVal.val); - } -} - -void schFreeTask(SSchTask *pTask) { - if (pTask->candidateAddrs) { - taosArrayDestroy(pTask->candidateAddrs); - } - - taosMemoryFreeClear(pTask->msg); - - if (pTask->children) { - taosArrayDestroy(pTask->children); - } - - if (pTask->parents) { - taosArrayDestroy(pTask->parents); - } - - if (pTask->execNodes) { - taosArrayDestroy(pTask->execNodes); - } -} - -static FORCE_INLINE bool schJobNeedToStop(SSchJob *pJob, int8_t *pStatus) { - int8_t status = SCH_GET_JOB_STATUS(pJob); - if (pStatus) { - *pStatus = status; - } - - return (status == JOB_TASK_STATUS_FAILED || status == JOB_TASK_STATUS_CANCELLED || - status == JOB_TASK_STATUS_CANCELLING || status == JOB_TASK_STATUS_DROPPING || - status == JOB_TASK_STATUS_SUCCEED); -} - -int32_t schValidateTaskReceivedMsgType(SSchJob *pJob, SSchTask *pTask, int32_t msgType) { - int32_t lastMsgType = SCH_GET_TASK_LASTMSG_TYPE(pTask); - int32_t taskStatus = SCH_GET_TASK_STATUS(pTask); - int32_t reqMsgType = msgType - 1; - switch (msgType) { - case TDMT_SCH_LINK_BROKEN: - case TDMT_VND_EXPLAIN_RSP: - return TSDB_CODE_SUCCESS; - case TDMT_VND_QUERY_RSP: // query_rsp may be processed later than ready_rsp - if (lastMsgType != reqMsgType && -1 != lastMsgType && TDMT_VND_FETCH != lastMsgType) { - SCH_TASK_DLOG("rsp msg type mis-match, last sent msgType:%s, rspType:%s", TMSG_INFO(lastMsgType), - TMSG_INFO(msgType)); - } - - if (taskStatus != JOB_TASK_STATUS_EXECUTING && taskStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED) { - SCH_TASK_DLOG("rsp msg conflicted with task status, status:%s, rspType:%s", jobTaskStatusStr(taskStatus), - TMSG_INFO(msgType)); - } - - SCH_SET_TASK_LASTMSG_TYPE(pTask, -1); - return TSDB_CODE_SUCCESS; - case TDMT_VND_RES_READY_RSP: - reqMsgType = TDMT_VND_QUERY; - if (lastMsgType != reqMsgType && -1 != lastMsgType) { - SCH_TASK_ELOG("rsp msg type mis-match, last sent msgType:%s, rspType:%s", - (lastMsgType > 0 ? TMSG_INFO(lastMsgType) : "null"), TMSG_INFO(msgType)); - SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); - } - - if (taskStatus != JOB_TASK_STATUS_EXECUTING && taskStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED) { - SCH_TASK_ELOG("rsp msg conflicted with task status, status:%s, rspType:%s", jobTaskStatusStr(taskStatus), - TMSG_INFO(msgType)); - SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); - } - - SCH_SET_TASK_LASTMSG_TYPE(pTask, -1); - return TSDB_CODE_SUCCESS; - case TDMT_VND_FETCH_RSP: - if (lastMsgType != reqMsgType && -1 != lastMsgType) { - SCH_TASK_ELOG("rsp msg type mis-match, last sent msgType:%s, rspType:%s", TMSG_INFO(lastMsgType), - TMSG_INFO(msgType)); - SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); - } - - if (taskStatus != JOB_TASK_STATUS_EXECUTING && taskStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED) { - SCH_TASK_ELOG("rsp msg conflicted with task status, status:%s, rspType:%s", jobTaskStatusStr(taskStatus), - TMSG_INFO(msgType)); - SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); - } - - SCH_SET_TASK_LASTMSG_TYPE(pTask, -1); - return TSDB_CODE_SUCCESS; - case TDMT_VND_CREATE_TABLE_RSP: - case TDMT_VND_DROP_TABLE_RSP: - case TDMT_VND_ALTER_TABLE_RSP: - case TDMT_VND_SUBMIT_RSP: - break; - default: - SCH_TASK_ELOG("unknown rsp msg, type:%s, status:%s", TMSG_INFO(msgType), jobTaskStatusStr(taskStatus)); - SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); - } - - if (lastMsgType != reqMsgType) { - SCH_TASK_ELOG("rsp msg type mis-match, last sent msgType:%s, rspType:%s", TMSG_INFO(lastMsgType), - TMSG_INFO(msgType)); - SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); - } - - if (taskStatus != JOB_TASK_STATUS_EXECUTING && taskStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED) { - SCH_TASK_ELOG("rsp msg conflicted with task status, status:%s, rspType:%s", jobTaskStatusStr(taskStatus), - TMSG_INFO(msgType)); - SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); - } - - SCH_SET_TASK_LASTMSG_TYPE(pTask, -1); - - return TSDB_CODE_SUCCESS; -} - -int32_t schCheckAndUpdateJobStatus(SSchJob *pJob, int8_t newStatus) { - int32_t code = 0; - - int8_t oriStatus = 0; - - while (true) { - oriStatus = SCH_GET_JOB_STATUS(pJob); - - if (oriStatus == newStatus) { - SCH_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); - } - - switch (oriStatus) { - case JOB_TASK_STATUS_NULL: - if (newStatus != JOB_TASK_STATUS_NOT_START) { - SCH_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); - } - - break; - case JOB_TASK_STATUS_NOT_START: - if (newStatus != JOB_TASK_STATUS_EXECUTING) { - SCH_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); - } - - break; - case JOB_TASK_STATUS_EXECUTING: - if (newStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED && newStatus != JOB_TASK_STATUS_FAILED && - newStatus != JOB_TASK_STATUS_CANCELLING && newStatus != JOB_TASK_STATUS_CANCELLED && - newStatus != JOB_TASK_STATUS_DROPPING) { - SCH_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); - } - - break; - case JOB_TASK_STATUS_PARTIAL_SUCCEED: - if (newStatus != JOB_TASK_STATUS_FAILED && newStatus != JOB_TASK_STATUS_SUCCEED && - newStatus != JOB_TASK_STATUS_DROPPING) { - SCH_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); - } - - break; - case JOB_TASK_STATUS_SUCCEED: - case JOB_TASK_STATUS_FAILED: - case JOB_TASK_STATUS_CANCELLING: - if (newStatus != JOB_TASK_STATUS_DROPPING) { - SCH_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); - } - - break; - case JOB_TASK_STATUS_CANCELLED: - case JOB_TASK_STATUS_DROPPING: - SCH_ERR_JRET(TSDB_CODE_QRY_JOB_FREED); - break; - - default: - SCH_JOB_ELOG("invalid job status:%s", jobTaskStatusStr(oriStatus)); - SCH_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); - } - - if (oriStatus != atomic_val_compare_exchange_8(&pJob->status, oriStatus, newStatus)) { - continue; - } - - SCH_JOB_DLOG("job status updated from %s to %s", jobTaskStatusStr(oriStatus), jobTaskStatusStr(newStatus)); - - break; - } - - return TSDB_CODE_SUCCESS; - -_return: - - SCH_JOB_ELOG("invalid job status update, from %s to %s", jobTaskStatusStr(oriStatus), jobTaskStatusStr(newStatus)); - SCH_ERR_RET(code); - return TSDB_CODE_SUCCESS; -} - -int32_t schBuildTaskRalation(SSchJob *pJob, SHashObj *planToTask) { - for (int32_t i = 0; i < pJob->levelNum; ++i) { - SSchLevel *pLevel = taosArrayGet(pJob->levels, i); - - for (int32_t m = 0; m < pLevel->taskNum; ++m) { - SSchTask *pTask = taosArrayGet(pLevel->subTasks, m); - SSubplan *pPlan = pTask->plan; - int32_t childNum = pPlan->pChildren ? (int32_t)LIST_LENGTH(pPlan->pChildren) : 0; - int32_t parentNum = pPlan->pParents ? (int32_t)LIST_LENGTH(pPlan->pParents) : 0; - - if (childNum > 0) { - if (pJob->levelIdx == pLevel->level) { - SCH_JOB_ELOG("invalid query plan, lowest level, childNum:%d", childNum); - SCH_ERR_RET(TSDB_CODE_SCH_INTERNAL_ERROR); - } - - pTask->children = taosArrayInit(childNum, POINTER_BYTES); - if (NULL == pTask->children) { - SCH_TASK_ELOG("taosArrayInit %d children failed", childNum); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - } - - for (int32_t n = 0; n < childNum; ++n) { - SSubplan *child = (SSubplan *)nodesListGetNode(pPlan->pChildren, n); - SSchTask **childTask = taosHashGet(planToTask, &child, POINTER_BYTES); - if (NULL == childTask || NULL == *childTask) { - SCH_TASK_ELOG("subplan children relationship error, level:%d, taskIdx:%d, childIdx:%d", i, m, n); - SCH_ERR_RET(TSDB_CODE_SCH_INTERNAL_ERROR); - } - - if (NULL == taosArrayPush(pTask->children, childTask)) { - SCH_TASK_ELOG("taosArrayPush childTask failed, level:%d, taskIdx:%d, childIdx:%d", i, m, n); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - } - - if (parentNum > 0) { - if (0 == pLevel->level) { - SCH_TASK_ELOG("invalid task info, level:0, parentNum:%d", parentNum); - SCH_ERR_RET(TSDB_CODE_SCH_INTERNAL_ERROR); - } - - pTask->parents = taosArrayInit(parentNum, POINTER_BYTES); - if (NULL == pTask->parents) { - SCH_TASK_ELOG("taosArrayInit %d parents failed", parentNum); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - } else { - if (0 != pLevel->level) { - SCH_TASK_ELOG("invalid task info, level:%d, parentNum:%d", pLevel->level, parentNum); - SCH_ERR_RET(TSDB_CODE_SCH_INTERNAL_ERROR); - } - } - - for (int32_t n = 0; n < parentNum; ++n) { - SSubplan *parent = (SSubplan *)nodesListGetNode(pPlan->pParents, n); - SSchTask **parentTask = taosHashGet(planToTask, &parent, POINTER_BYTES); - if (NULL == parentTask || NULL == *parentTask) { - SCH_TASK_ELOG("subplan parent relationship error, level:%d, taskIdx:%d, childIdx:%d", i, m, n); - SCH_ERR_RET(TSDB_CODE_SCH_INTERNAL_ERROR); - } - - if (NULL == taosArrayPush(pTask->parents, parentTask)) { - SCH_TASK_ELOG("taosArrayPush parentTask failed, level:%d, taskIdx:%d, childIdx:%d", i, m, n); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - } - - SCH_TASK_DLOG("level:%d, parentNum:%d, childNum:%d", i, parentNum, childNum); - } - } - - SSchLevel *pLevel = taosArrayGet(pJob->levels, 0); - if (SCH_IS_QUERY_JOB(pJob) && pLevel->taskNum > 1) { - SCH_JOB_ELOG("invalid query plan, level:0, taskNum:%d", pLevel->taskNum); - SCH_ERR_RET(TSDB_CODE_SCH_INTERNAL_ERROR); - } - - return TSDB_CODE_SUCCESS; -} - -int32_t schRecordTaskSucceedNode(SSchJob *pJob, SSchTask *pTask) { - SQueryNodeAddr *addr = taosArrayGet(pTask->candidateAddrs, pTask->candidateIdx); - if (NULL == addr) { - SCH_TASK_ELOG("taosArrayGet candidate addr failed, idx:%d, size:%d", pTask->candidateIdx, - (int32_t)taosArrayGetSize(pTask->candidateAddrs)); - SCH_ERR_RET(TSDB_CODE_SCH_INTERNAL_ERROR); - } - - pTask->succeedAddr = *addr; - - return TSDB_CODE_SUCCESS; -} - -int32_t schRecordTaskExecNode(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr, void *handle) { - SSchNodeInfo nodeInfo = {.addr = *addr, .handle = handle}; - - if (NULL == taosArrayPush(pTask->execNodes, &nodeInfo)) { - SCH_TASK_ELOG("taosArrayPush nodeInfo to execNodes list failed, errno:%d", errno); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - SCH_TASK_DLOG("task execNode recorded, handle:%p", handle); - - return TSDB_CODE_SUCCESS; -} - -int32_t schValidateAndBuildJob(SQueryPlan *pDag, SSchJob *pJob) { - int32_t code = 0; - pJob->queryId = pDag->queryId; - - if (pDag->numOfSubplans <= 0) { - SCH_JOB_ELOG("invalid subplan num:%d", pDag->numOfSubplans); - SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); - } - - int32_t levelNum = (int32_t)LIST_LENGTH(pDag->pSubplans); - if (levelNum <= 0) { - SCH_JOB_ELOG("invalid level num:%d", levelNum); - SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); - } - - SHashObj *planToTask = taosHashInit( - SCHEDULE_DEFAULT_MAX_TASK_NUM, - taosGetDefaultHashFunction(POINTER_BYTES == sizeof(int64_t) ? TSDB_DATA_TYPE_BIGINT : TSDB_DATA_TYPE_INT), false, - HASH_NO_LOCK); - if (NULL == planToTask) { - SCH_JOB_ELOG("taosHashInit %d failed", SCHEDULE_DEFAULT_MAX_TASK_NUM); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - pJob->levels = taosArrayInit(levelNum, sizeof(SSchLevel)); - if (NULL == pJob->levels) { - SCH_JOB_ELOG("taosArrayInit %d failed", levelNum); - SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - pJob->levelNum = levelNum; - pJob->levelIdx = levelNum - 1; - - pJob->subPlans = pDag->pSubplans; - - SSchLevel level = {0}; - SNodeListNode *plans = NULL; - int32_t taskNum = 0; - SSchLevel *pLevel = NULL; - - level.status = JOB_TASK_STATUS_NOT_START; - - for (int32_t i = 0; i < levelNum; ++i) { - if (NULL == taosArrayPush(pJob->levels, &level)) { - SCH_JOB_ELOG("taosArrayPush level failed, level:%d", i); - SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - pLevel = taosArrayGet(pJob->levels, i); - pLevel->level = i; - - plans = (SNodeListNode *)nodesListGetNode(pDag->pSubplans, i); - if (NULL == plans) { - SCH_JOB_ELOG("empty level plan, level:%d", i); - SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT); - } - - taskNum = (int32_t)LIST_LENGTH(plans->pNodeList); - if (taskNum <= 0) { - SCH_JOB_ELOG("invalid level plan number:%d, level:%d", taskNum, i); - SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT); - } - - pLevel->taskNum = taskNum; - - pLevel->subTasks = taosArrayInit(taskNum, sizeof(SSchTask)); - if (NULL == pLevel->subTasks) { - SCH_JOB_ELOG("taosArrayInit %d failed", taskNum); - SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - for (int32_t n = 0; n < taskNum; ++n) { - SSubplan *plan = (SSubplan *)nodesListGetNode(plans->pNodeList, n); - - SCH_SET_JOB_TYPE(pJob, plan->subplanType); - - SSchTask task = {0}; - SSchTask *pTask = &task; - - SCH_ERR_JRET(schInitTask(pJob, &task, plan, pLevel)); - - void *p = taosArrayPush(pLevel->subTasks, &task); - if (NULL == p) { - SCH_TASK_ELOG("taosArrayPush task to level failed, level:%d, taskIdx:%d", pLevel->level, n); - SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - if (0 != taosHashPut(planToTask, &plan, POINTER_BYTES, &p, POINTER_BYTES)) { - SCH_TASK_ELOG("taosHashPut to planToTaks failed, taskIdx:%d", n); - SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - ++pJob->taskNum; - } - - SCH_JOB_DLOG("level initialized, taskNum:%d", taskNum); - } - - SCH_ERR_JRET(schBuildTaskRalation(pJob, planToTask)); - -_return: - if (planToTask) { - taosHashCleanup(planToTask); - } - - SCH_RET(code); -} - -int32_t schSetTaskCandidateAddrs(SSchJob *pJob, SSchTask *pTask) { - if (NULL != pTask->candidateAddrs) { - return TSDB_CODE_SUCCESS; - } - - pTask->candidateIdx = 0; - pTask->candidateAddrs = taosArrayInit(SCH_MAX_CANDIDATE_EP_NUM, sizeof(SQueryNodeAddr)); - if (NULL == pTask->candidateAddrs) { - SCH_TASK_ELOG("taosArrayInit %d condidate addrs failed", SCH_MAX_CANDIDATE_EP_NUM); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - if (pTask->plan->execNode.epSet.numOfEps > 0) { - if (NULL == taosArrayPush(pTask->candidateAddrs, &pTask->plan->execNode)) { - SCH_TASK_ELOG("taosArrayPush execNode to candidate addrs failed, errno:%d", errno); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - SCH_TASK_DLOG("use execNode from plan as candidate addr, numOfEps:%d", pTask->plan->execNode.epSet.numOfEps); - - return TSDB_CODE_SUCCESS; - } - - int32_t addNum = 0; - int32_t nodeNum = 0; - if (pJob->nodeList) { - nodeNum = taosArrayGetSize(pJob->nodeList); - - for (int32_t i = 0; i < nodeNum && addNum < SCH_MAX_CANDIDATE_EP_NUM; ++i) { - SQueryNodeAddr *naddr = taosArrayGet(pJob->nodeList, i); - - if (NULL == taosArrayPush(pTask->candidateAddrs, naddr)) { - SCH_TASK_ELOG("taosArrayPush execNode to candidate addrs failed, addNum:%d, errno:%d", addNum, errno); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - ++addNum; - } - } - - if (addNum <= 0) { - SCH_TASK_ELOG("no available execNode as candidates, nodeNum:%d", nodeNum); - SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); - } - - /* - for (int32_t i = 0; i < job->dataSrcEps.numOfEps && addNum < SCH_MAX_CANDIDATE_EP_NUM; ++i) { - strncpy(epSet->fqdn[epSet->numOfEps], job->dataSrcEps.fqdn[i], sizeof(job->dataSrcEps.fqdn[i])); - epSet->port[epSet->numOfEps] = job->dataSrcEps.port[i]; - - ++epSet->numOfEps; - } - */ - - return TSDB_CODE_SUCCESS; -} - -int32_t schPushTaskToExecList(SSchJob *pJob, SSchTask *pTask) { - int32_t code = taosHashPut(pJob->execTasks, &pTask->taskId, sizeof(pTask->taskId), &pTask, POINTER_BYTES); - if (0 != code) { - if (HASH_NODE_EXIST(code)) { - SCH_TASK_ELOG("task already in execTask list, code:%x", code); - SCH_ERR_RET(TSDB_CODE_SCH_INTERNAL_ERROR); - } - - SCH_TASK_ELOG("taosHashPut task to execTask list failed, errno:%d", errno); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - SCH_TASK_DLOG("task added to execTask list, numOfTasks:%d", taosHashGetSize(pJob->execTasks)); - - return TSDB_CODE_SUCCESS; -} - -int32_t schMoveTaskToSuccList(SSchJob *pJob, SSchTask *pTask, bool *moved) { - if (0 != taosHashRemove(pJob->execTasks, &pTask->taskId, sizeof(pTask->taskId))) { - SCH_TASK_WLOG("remove task from execTask list failed, may not exist, status:%s", SCH_GET_TASK_STATUS_STR(pTask)); - } else { - SCH_TASK_DLOG("task removed from execTask list, numOfTasks:%d", taosHashGetSize(pJob->execTasks)); - } - - int32_t code = taosHashPut(pJob->succTasks, &pTask->taskId, sizeof(pTask->taskId), &pTask, POINTER_BYTES); - if (0 != code) { - if (HASH_NODE_EXIST(code)) { - *moved = true; - SCH_TASK_ELOG("task already in succTask list, status:%s", SCH_GET_TASK_STATUS_STR(pTask)); - SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); - } - - SCH_TASK_ELOG("taosHashPut task to succTask list failed, errno:%d", errno); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - *moved = true; - - SCH_TASK_DLOG("task moved to succTask list, numOfTasks:%d", taosHashGetSize(pJob->succTasks)); - - return TSDB_CODE_SUCCESS; -} - -int32_t schMoveTaskToFailList(SSchJob *pJob, SSchTask *pTask, bool *moved) { - *moved = false; - - if (0 != taosHashRemove(pJob->execTasks, &pTask->taskId, sizeof(pTask->taskId))) { - SCH_TASK_WLOG("remove task from execTask list failed, may not exist, status:%s", SCH_GET_TASK_STATUS_STR(pTask)); - } - - int32_t code = taosHashPut(pJob->failTasks, &pTask->taskId, sizeof(pTask->taskId), &pTask, POINTER_BYTES); - if (0 != code) { - if (HASH_NODE_EXIST(code)) { - *moved = true; - - SCH_TASK_WLOG("task already in failTask list, status:%s", SCH_GET_TASK_STATUS_STR(pTask)); - SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); - } - - SCH_TASK_ELOG("taosHashPut task to failTask list failed, errno:%d", errno); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - *moved = true; - - SCH_TASK_DLOG("task moved to failTask list, numOfTasks:%d", taosHashGetSize(pJob->failTasks)); - - return TSDB_CODE_SUCCESS; -} - -int32_t schMoveTaskToExecList(SSchJob *pJob, SSchTask *pTask, bool *moved) { - if (0 != taosHashRemove(pJob->succTasks, &pTask->taskId, sizeof(pTask->taskId))) { - SCH_TASK_WLOG("remove task from succTask list failed, may not exist, status:%s", SCH_GET_TASK_STATUS_STR(pTask)); - } - - int32_t code = taosHashPut(pJob->execTasks, &pTask->taskId, sizeof(pTask->taskId), &pTask, POINTER_BYTES); - if (0 != code) { - if (HASH_NODE_EXIST(code)) { - *moved = true; - - SCH_TASK_ELOG("task already in execTask list, status:%s", SCH_GET_TASK_STATUS_STR(pTask)); - SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); - } - - SCH_TASK_ELOG("taosHashPut task to execTask list failed, errno:%d", errno); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - *moved = true; - - SCH_TASK_DLOG("task moved to execTask list, numOfTasks:%d", taosHashGetSize(pJob->execTasks)); - - return TSDB_CODE_SUCCESS; -} - -int32_t schTaskCheckSetRetry(SSchJob *pJob, SSchTask *pTask, int32_t errCode, bool *needRetry) { - int8_t status = 0; - ++pTask->tryTimes; - - if (schJobNeedToStop(pJob, &status)) { - *needRetry = false; - SCH_TASK_DLOG("task no more retry cause of job status, job status:%s", jobTaskStatusStr(status)); - return TSDB_CODE_SUCCESS; - } - - if (pTask->tryTimes >= REQUEST_MAX_TRY_TIMES) { - *needRetry = false; - SCH_TASK_DLOG("task no more retry since reach max try times, tryTimes:%d", pTask->tryTimes); - return TSDB_CODE_SUCCESS; - } - - if (!NEED_SCHEDULER_RETRY_ERROR(errCode)) { - *needRetry = false; - SCH_TASK_DLOG("task no more retry cause of errCode, errCode:%x - %s", errCode, tstrerror(errCode)); - return TSDB_CODE_SUCCESS; - } - - // TODO CHECK epList/condidateList - if (SCH_IS_DATA_SRC_TASK(pTask)) { - if (pTask->tryTimes >= SCH_TASK_NUM_OF_EPS(&pTask->plan->execNode)) { - *needRetry = false; - SCH_TASK_DLOG("task no more retry since all ep tried, tryTimes:%d, epNum:%d", pTask->tryTimes, - SCH_TASK_NUM_OF_EPS(&pTask->plan->execNode)); - return TSDB_CODE_SUCCESS; - } - } else { - int32_t candidateNum = taosArrayGetSize(pTask->candidateAddrs); - - if ((pTask->candidateIdx + 1) >= candidateNum) { - *needRetry = false; - SCH_TASK_DLOG("task no more retry since all candiates tried, candidateIdx:%d, candidateNum:%d", - pTask->candidateIdx, candidateNum); - return TSDB_CODE_SUCCESS; - } - } - - *needRetry = true; - SCH_TASK_DLOG("task need the %dth retry, errCode:%x - %s", pTask->tryTimes, errCode, tstrerror(errCode)); - - return TSDB_CODE_SUCCESS; -} - -int32_t schHandleTaskRetry(SSchJob *pJob, SSchTask *pTask) { - atomic_sub_fetch_32(&pTask->level->taskLaunchedNum, 1); - - if (SCH_TASK_NEED_FLOW_CTRL(pJob, pTask)) { - SCH_ERR_RET(schDecTaskFlowQuota(pJob, pTask)); - SCH_ERR_RET(schLaunchTasksInFlowCtrlList(pJob, pTask)); - } - - if (SCH_IS_DATA_SRC_TASK(pTask)) { - SCH_SWITCH_EPSET(&pTask->plan->execNode); - } else { - ++pTask->candidateIdx; - } - - SCH_ERR_RET(schLaunchTask(pJob, pTask)); - - return TSDB_CODE_SUCCESS; -} - -int32_t schUpdateHbConnection(SQueryNodeEpId *epId, SSchTrans *trans) { - int32_t code = 0; - SSchHbTrans *hb = NULL; - - hb = taosHashGet(schMgmt.hbConnections, epId, sizeof(SQueryNodeEpId)); - if (NULL == hb) { - qError("taosHashGet hb connection failed, nodeId:%d, fqdn:%s, port:%d", epId->nodeId, epId->ep.fqdn, epId->ep.port); - SCH_ERR_RET(TSDB_CODE_QRY_APP_ERROR); - } - - SCH_LOCK(SCH_WRITE, &hb->lock); - memcpy(&hb->trans, trans, sizeof(*trans)); - SCH_UNLOCK(SCH_WRITE, &hb->lock); - - qDebug("hb connection updated, sId:%" PRIx64 ", nodeId:%d, fqdn:%s, port:%d, instance:%p, handle:%p", schMgmt.sId, - epId->nodeId, epId->ep.fqdn, epId->ep.port, trans->transInst, trans->transHandle); - - return TSDB_CODE_SUCCESS; -} - -void schUpdateJobErrCode(SSchJob *pJob, int32_t errCode) { - if (TSDB_CODE_SUCCESS == errCode) { - return; - } - - int32_t origCode = atomic_load_32(&pJob->errCode); - if (TSDB_CODE_SUCCESS == origCode) { - if (origCode == atomic_val_compare_exchange_32(&pJob->errCode, origCode, errCode)) { - goto _return; - } - - origCode = atomic_load_32(&pJob->errCode); - } - - if (NEED_CLIENT_HANDLE_ERROR(origCode)) { - return; - } - - if (NEED_CLIENT_HANDLE_ERROR(errCode)) { - atomic_store_32(&pJob->errCode, errCode); - goto _return; - } - - return; - -_return: - - SCH_JOB_DLOG("job errCode updated to %x - %s", errCode, tstrerror(errCode)); -} - -int32_t schProcessOnJobFailureImpl(SSchJob *pJob, int32_t status, int32_t errCode) { - // if already FAILED, no more processing - SCH_ERR_RET(schCheckAndUpdateJobStatus(pJob, status)); - - schUpdateJobErrCode(pJob, errCode); - - if (atomic_load_8(&pJob->userFetch) || pJob->attr.syncSchedule) { - tsem_post(&pJob->rspSem); - } - - int32_t code = atomic_load_32(&pJob->errCode); - - SCH_JOB_DLOG("job failed with error: %s", tstrerror(code)); - - SCH_RET(code); -} - -// Note: no more task error processing, handled in function internal -int32_t schProcessOnJobFailure(SSchJob *pJob, int32_t errCode) { - SCH_RET(schProcessOnJobFailureImpl(pJob, JOB_TASK_STATUS_FAILED, errCode)); -} - -// Note: no more error processing, handled in function internal -int32_t schProcessOnJobDropped(SSchJob *pJob, int32_t errCode) { - SCH_RET(schProcessOnJobFailureImpl(pJob, JOB_TASK_STATUS_DROPPING, errCode)); -} - -// Note: no more task error processing, handled in function internal -int32_t schProcessOnJobPartialSuccess(SSchJob *pJob) { - int32_t code = 0; - - SCH_ERR_RET(schCheckAndUpdateJobStatus(pJob, JOB_TASK_STATUS_PARTIAL_SUCCEED)); - - if (pJob->attr.syncSchedule) { - tsem_post(&pJob->rspSem); - } - - if (atomic_load_8(&pJob->userFetch)) { - SCH_ERR_JRET(schFetchFromRemote(pJob)); - } - - return TSDB_CODE_SUCCESS; - -_return: - - SCH_RET(schProcessOnJobFailure(pJob, code)); -} - -void schProcessOnDataFetched(SSchJob *job) { - atomic_val_compare_exchange_32(&job->remoteFetch, 1, 0); - tsem_post(&job->rspSem); -} - -// Note: no more task error processing, handled in function internal -int32_t schProcessOnTaskFailure(SSchJob *pJob, SSchTask *pTask, int32_t errCode) { - int8_t status = 0; - - if (schJobNeedToStop(pJob, &status)) { - SCH_TASK_DLOG("task failed not processed cause of job status, job status:%s", jobTaskStatusStr(status)); - SCH_RET(atomic_load_32(&pJob->errCode)); - } - - bool needRetry = false; - bool moved = false; - int32_t taskDone = 0; - int32_t code = 0; - - SCH_TASK_DLOG("taskOnFailure, code:%s", tstrerror(errCode)); - - SCH_ERR_JRET(schTaskCheckSetRetry(pJob, pTask, errCode, &needRetry)); - - if (!needRetry) { - SCH_TASK_ELOG("task failed and no more retry, code:%s", tstrerror(errCode)); - - if (SCH_GET_TASK_STATUS(pTask) == JOB_TASK_STATUS_EXECUTING) { - SCH_ERR_JRET(schMoveTaskToFailList(pJob, pTask, &moved)); - } else { - SCH_TASK_ELOG("task not in executing list, status:%s", SCH_GET_TASK_STATUS_STR(pTask)); - SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR); - } - - SCH_SET_TASK_STATUS(pTask, JOB_TASK_STATUS_FAILED); - - if (SCH_IS_WAIT_ALL_JOB(pJob)) { - SCH_LOCK(SCH_WRITE, &pTask->level->lock); - pTask->level->taskFailed++; - taskDone = pTask->level->taskSucceed + pTask->level->taskFailed; - SCH_UNLOCK(SCH_WRITE, &pTask->level->lock); - - schUpdateJobErrCode(pJob, errCode); - - if (taskDone < pTask->level->taskNum) { - SCH_TASK_DLOG("need to wait other tasks, doneNum:%d, allNum:%d", taskDone, pTask->level->taskNum); - SCH_RET(errCode); - } - } - } else { - SCH_ERR_JRET(schHandleTaskRetry(pJob, pTask)); - - return TSDB_CODE_SUCCESS; - } - -_return: - - SCH_RET(schProcessOnJobFailure(pJob, errCode)); -} - -// Note: no more task error processing, handled in function internal -int32_t schProcessOnTaskSuccess(SSchJob *pJob, SSchTask *pTask) { - bool moved = false; - int32_t code = 0; - - SCH_TASK_DLOG("taskOnSuccess, status:%s", SCH_GET_TASK_STATUS_STR(pTask)); - - SCH_ERR_JRET(schMoveTaskToSuccList(pJob, pTask, &moved)); - - SCH_SET_TASK_STATUS(pTask, JOB_TASK_STATUS_PARTIAL_SUCCEED); - - SCH_ERR_JRET(schRecordTaskSucceedNode(pJob, pTask)); - - SCH_ERR_JRET(schLaunchTasksInFlowCtrlList(pJob, pTask)); - - int32_t parentNum = pTask->parents ? (int32_t)taosArrayGetSize(pTask->parents) : 0; - if (parentNum == 0) { - int32_t taskDone = 0; - if (SCH_IS_WAIT_ALL_JOB(pJob)) { - SCH_LOCK(SCH_WRITE, &pTask->level->lock); - pTask->level->taskSucceed++; - taskDone = pTask->level->taskSucceed + pTask->level->taskFailed; - SCH_UNLOCK(SCH_WRITE, &pTask->level->lock); - - if (taskDone < pTask->level->taskNum) { - SCH_TASK_DLOG("wait all tasks, done:%d, all:%d", taskDone, pTask->level->taskNum); - return TSDB_CODE_SUCCESS; - } else if (taskDone > pTask->level->taskNum) { - SCH_TASK_ELOG("taskDone number invalid, done:%d, total:%d", taskDone, pTask->level->taskNum); - } - - if (pTask->level->taskFailed > 0) { - SCH_RET(schProcessOnJobFailure(pJob, 0)); - } else { - SCH_RET(schProcessOnJobPartialSuccess(pJob)); - } - } else { - pJob->resNode = pTask->succeedAddr; - } - - pJob->fetchTask = pTask; - - SCH_ERR_JRET(schMoveTaskToExecList(pJob, pTask, &moved)); - - SCH_RET(schProcessOnJobPartialSuccess(pJob)); - } - - /* - if (SCH_IS_DATA_SRC_TASK(task) && job->dataSrcEps.numOfEps < SCH_MAX_CANDIDATE_EP_NUM) { - strncpy(job->dataSrcEps.fqdn[job->dataSrcEps.numOfEps], task->execAddr.fqdn, sizeof(task->execAddr.fqdn)); - job->dataSrcEps.port[job->dataSrcEps.numOfEps] = task->execAddr.port; - - ++job->dataSrcEps.numOfEps; - } - */ - - for (int32_t i = 0; i < parentNum; ++i) { - SSchTask *par = *(SSchTask **)taosArrayGet(pTask->parents, i); - int32_t readyNum = atomic_add_fetch_32(&par->childReady, 1); - - SCH_LOCK(SCH_WRITE, &par->lock); - SDownstreamSourceNode source = {.type = QUERY_NODE_DOWNSTREAM_SOURCE, - .taskId = pTask->taskId, - .schedId = schMgmt.sId, - .addr = pTask->succeedAddr}; - qSetSubplanExecutionNode(par->plan, pTask->plan->id.groupId, &source); - SCH_UNLOCK(SCH_WRITE, &par->lock); - - if (SCH_TASK_READY_TO_LUNCH(readyNum, par)) { - SCH_ERR_RET(schLaunchTaskImpl(pJob, par)); - } - } - - return TSDB_CODE_SUCCESS; - -_return: - - SCH_RET(schProcessOnJobFailure(pJob, code)); -} - -// Note: no more error processing, handled in function internal -int32_t schFetchFromRemote(SSchJob *pJob) { - int32_t code = 0; - - if (atomic_val_compare_exchange_32(&pJob->remoteFetch, 0, 1) != 0) { - SCH_JOB_ELOG("prior fetching not finished, remoteFetch:%d", atomic_load_32(&pJob->remoteFetch)); - return TSDB_CODE_SUCCESS; - } - - void *resData = atomic_load_ptr(&pJob->resData); - if (resData) { - atomic_val_compare_exchange_32(&pJob->remoteFetch, 1, 0); - - SCH_JOB_DLOG("res already fetched, res:%p", resData); - return TSDB_CODE_SUCCESS; - } - - SCH_ERR_JRET(schBuildAndSendMsg(pJob, pJob->fetchTask, &pJob->resNode, TDMT_VND_FETCH)); - - return TSDB_CODE_SUCCESS; - -_return: - - atomic_val_compare_exchange_32(&pJob->remoteFetch, 1, 0); - - SCH_RET(schProcessOnTaskFailure(pJob, pJob->fetchTask, code)); -} - -int32_t schProcessOnExplainDone(SSchJob *pJob, SSchTask *pTask, SRetrieveTableRsp *pRsp) { - SCH_TASK_DLOG("got explain rsp, rows:%d, complete:%d", htonl(pRsp->numOfRows), pRsp->completed); - - atomic_store_32(&pJob->resNumOfRows, htonl(pRsp->numOfRows)); - atomic_store_ptr(&pJob->resData, pRsp); - - SCH_SET_TASK_STATUS(pTask, JOB_TASK_STATUS_SUCCEED); - - schProcessOnDataFetched(pJob); - - return TSDB_CODE_SUCCESS; -} - -int32_t schSaveJobQueryRes(SSchJob *pJob, SResReadyRsp *rsp) { - if (rsp->tbFName[0]) { - if (NULL == pJob->queryRes) { - pJob->queryRes = taosArrayInit(pJob->taskNum, sizeof(STbVerInfo)); - if (NULL == pJob->queryRes) { - SCH_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); - } - } - - STbVerInfo tbInfo; - strcpy(tbInfo.tbFName, rsp->tbFName); - tbInfo.sversion = rsp->sversion; - tbInfo.tversion = rsp->tversion; - - taosArrayPush((SArray *)pJob->queryRes, &tbInfo); - } - - return TSDB_CODE_SUCCESS; -} - - -// Note: no more task error processing, handled in function internal -int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, char *msg, int32_t msgSize, - int32_t rspCode) { - int32_t code = 0; - int8_t status = 0; - - if (schJobNeedToStop(pJob, &status)) { - SCH_TASK_ELOG("rsp not processed cause of job status, job status:%s, rspCode:0x%x", jobTaskStatusStr(status), - rspCode); - SCH_RET(atomic_load_32(&pJob->errCode)); - } - - SCH_ERR_JRET(schValidateTaskReceivedMsgType(pJob, pTask, msgType)); - - switch (msgType) { - case TDMT_VND_CREATE_TABLE_RSP: { - SVCreateTbBatchRsp batchRsp = {0}; - if (msg) { - SDecoder coder = {0}; - tDecoderInit(&coder, msg, msgSize); - code = tDecodeSVCreateTbBatchRsp(&coder, &batchRsp); - if (TSDB_CODE_SUCCESS == code && batchRsp.nRsps > 0) { - for (int32_t i = 0; i < batchRsp.nRsps; ++i) { - SVCreateTbRsp *rsp = batchRsp.pRsps + i; - if (TSDB_CODE_SUCCESS != rsp->code) { - code = rsp->code; - tDecoderClear(&coder); - SCH_ERR_JRET(code); - } - } - } - tDecoderClear(&coder); - SCH_ERR_JRET(code); - } - - SCH_ERR_JRET(rspCode); - SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); - break; - } - case TDMT_VND_DROP_TABLE_RSP: { - SVDropTbBatchRsp batchRsp = {0}; - if (msg) { - SDecoder coder = {0}; - tDecoderInit(&coder, msg, msgSize); - code = tDecodeSVDropTbBatchRsp(&coder, &batchRsp); - if (TSDB_CODE_SUCCESS == code && batchRsp.nRsps > 0) { - for (int32_t i = 0; i < batchRsp.nRsps; ++i) { - SVDropTbRsp *rsp = batchRsp.pRsps + i; - if (TSDB_CODE_SUCCESS != rsp->code) { - code = rsp->code; - tDecoderClear(&coder); - SCH_ERR_JRET(code); - } - } - } - tDecoderClear(&coder); - SCH_ERR_JRET(code); - } - - SCH_ERR_JRET(rspCode); - SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); - break; - } - case TDMT_VND_ALTER_TABLE_RSP: { - SVAlterTbRsp rsp = {0}; - if (msg) { - SDecoder coder = {0}; - tDecoderInit(&coder, msg, msgSize); - code = tDecodeSVAlterTbRsp(&coder, &rsp); - tDecoderClear(&coder); - SCH_ERR_JRET(code); - SCH_ERR_JRET(rsp.code); - } - - SCH_ERR_JRET(rspCode); - - if (NULL == msg) { - SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT); - } - SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); - break; - } - case TDMT_VND_SUBMIT_RSP: { - SCH_ERR_JRET(rspCode); - - if (msg) { - SDecoder coder = {0}; - SSubmitRsp *rsp = taosMemoryMalloc(sizeof(*rsp)); - tDecoderInit(&coder, msg, msgSize); - code = tDecodeSSubmitRsp(&coder, rsp); - if (code) { - SCH_TASK_ELOG("decode submitRsp failed, code:%d", code); - tFreeSSubmitRsp(rsp); - SCH_ERR_JRET(code); - } - - if (rsp->nBlocks > 0) { - for (int32_t i = 0; i < rsp->nBlocks; ++i) { - SSubmitBlkRsp *blk = rsp->pBlocks + i; - if (TSDB_CODE_SUCCESS != blk->code) { - code = blk->code; - tFreeSSubmitRsp(rsp); - SCH_ERR_JRET(code); - } - } - } - - atomic_add_fetch_32(&pJob->resNumOfRows, rsp->affectedRows); - SCH_TASK_DLOG("submit succeed, affectedRows:%d", rsp->affectedRows); - - SCH_LOCK(SCH_WRITE, &pJob->resLock); - if (pJob->queryRes) { - SSubmitRsp *sum = pJob->queryRes; - sum->affectedRows += rsp->affectedRows; - sum->nBlocks += rsp->nBlocks; - sum->pBlocks = taosMemoryRealloc(sum->pBlocks, sum->nBlocks * sizeof(*sum->pBlocks)); - memcpy(sum->pBlocks + sum->nBlocks - rsp->nBlocks, rsp->pBlocks, rsp->nBlocks * sizeof(*sum->pBlocks)); - taosMemoryFree(rsp->pBlocks); - taosMemoryFree(rsp); - } else { - pJob->queryRes = rsp; - } - SCH_UNLOCK(SCH_WRITE, &pJob->resLock); - } - - SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); - - break; - } - case TDMT_VND_QUERY_RSP: { - SQueryTableRsp rsp = {0}; - if (msg) { - SCH_ERR_JRET(tDeserializeSQueryTableRsp(msg, msgSize, &rsp)); - SCH_ERR_JRET(rsp.code); - } - - SCH_ERR_JRET(rspCode); - - if (NULL == msg) { - SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT); - } - - // SCH_ERR_JRET(schBuildAndSendMsg(pJob, pTask, NULL, TDMT_VND_RES_READY)); - - break; - } - case TDMT_VND_RES_READY_RSP: { - SResReadyRsp *rsp = (SResReadyRsp *)msg; - - SCH_ERR_JRET(rspCode); - if (NULL == msg) { - SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT); - } - SCH_ERR_JRET(rsp->code); - - SCH_ERR_JRET(schSaveJobQueryRes(pJob, rsp)); - - SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); - - break; - } - case TDMT_VND_EXPLAIN_RSP: { - SCH_ERR_JRET(rspCode); - if (NULL == msg) { - SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT); - } - - if (!SCH_IS_EXPLAIN_JOB(pJob)) { - SCH_TASK_ELOG("invalid msg received for none explain query, msg type:%s", TMSG_INFO(msgType)); - SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT); - } - - if (pJob->resData) { - SCH_TASK_ELOG("explain result is already generated, res:%p", pJob->resData); - SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR); - } - - SExplainRsp rsp = {0}; - if (tDeserializeSExplainRsp(msg, msgSize, &rsp)) { - taosMemoryFree(rsp.subplanInfo); - SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - SRetrieveTableRsp *pRsp = NULL; - SCH_ERR_JRET(qExplainUpdateExecInfo(pJob->explainCtx, &rsp, pTask->plan->id.groupId, &pRsp)); - - if (pRsp) { - SCH_ERR_JRET(schProcessOnExplainDone(pJob, pTask, pRsp)); - } - break; - } - case TDMT_VND_FETCH_RSP: { - SRetrieveTableRsp *rsp = (SRetrieveTableRsp *)msg; - - SCH_ERR_JRET(rspCode); - if (NULL == msg) { - SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT); - } - - if (SCH_IS_EXPLAIN_JOB(pJob)) { - if (rsp->completed) { - SRetrieveTableRsp *pRsp = NULL; - SCH_ERR_JRET(qExecExplainEnd(pJob->explainCtx, &pRsp)); - if (pRsp) { - SCH_ERR_JRET(schProcessOnExplainDone(pJob, pTask, pRsp)); - } - - return TSDB_CODE_SUCCESS; - } - - atomic_val_compare_exchange_32(&pJob->remoteFetch, 1, 0); - - SCH_ERR_JRET(schFetchFromRemote(pJob)); - - return TSDB_CODE_SUCCESS; - } - - if (pJob->resData) { - SCH_TASK_ELOG("got fetch rsp while res already exists, res:%p", pJob->resData); - taosMemoryFreeClear(rsp); - SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR); - } - - atomic_store_ptr(&pJob->resData, rsp); - atomic_add_fetch_32(&pJob->resNumOfRows, htonl(rsp->numOfRows)); - - if (rsp->completed) { - SCH_SET_TASK_STATUS(pTask, JOB_TASK_STATUS_SUCCEED); - } - - SCH_TASK_DLOG("got fetch rsp, rows:%d, complete:%d", htonl(rsp->numOfRows), rsp->completed); - - schProcessOnDataFetched(pJob); - break; - } - case TDMT_VND_DROP_TASK_RSP: { - // SHOULD NEVER REACH HERE - SCH_TASK_ELOG("invalid status to handle drop task rsp, refId:%" PRIx64, pJob->refId); - SCH_ERR_JRET(TSDB_CODE_SCH_INTERNAL_ERROR); - break; - } - case TDMT_SCH_LINK_BROKEN: - SCH_TASK_ELOG("link broken received, error:%x - %s", rspCode, tstrerror(rspCode)); - SCH_ERR_JRET(rspCode); - break; - default: - SCH_TASK_ELOG("unknown rsp msg, type:%d, status:%s", msgType, SCH_GET_TASK_STATUS_STR(pTask)); - SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT); - } - - return TSDB_CODE_SUCCESS; - -_return: - - SCH_RET(schProcessOnTaskFailure(pJob, pTask, code)); -} - -int32_t schGetTaskFromTaskList(SHashObj *pTaskList, uint64_t taskId, SSchTask **pTask) { - int32_t s = taosHashGetSize(pTaskList); - if (s <= 0) { - return TSDB_CODE_SUCCESS; - } - - SSchTask **task = taosHashGet(pTaskList, &taskId, sizeof(taskId)); - if (NULL == task || NULL == (*task)) { - return TSDB_CODE_SUCCESS; - } - - *pTask = *task; - - return TSDB_CODE_SUCCESS; -} - -int32_t schUpdateTaskExecNodeHandle(SSchTask *pTask, void *handle, int32_t rspCode) { - if (rspCode || NULL == pTask->execNodes || taosArrayGetSize(pTask->execNodes) > 1 || - taosArrayGetSize(pTask->execNodes) <= 0) { - return TSDB_CODE_SUCCESS; - } - - SSchNodeInfo *nodeInfo = taosArrayGet(pTask->execNodes, 0); - nodeInfo->handle = handle; - - return TSDB_CODE_SUCCESS; -} - -int32_t schHandleCallback(void *param, const SDataBuf *pMsg, int32_t msgType, int32_t rspCode) { - int32_t code = 0; - SSchTaskCallbackParam *pParam = (SSchTaskCallbackParam *)param; - SSchTask *pTask = NULL; - - SSchJob *pJob = schAcquireJob(pParam->refId); - if (NULL == pJob) { - qWarn("QID:0x%" PRIx64 ",TID:0x%" PRIx64 "taosAcquireRef job failed, may be dropped, refId:%" PRIx64, - pParam->queryId, pParam->taskId, pParam->refId); - SCH_ERR_JRET(TSDB_CODE_QRY_JOB_FREED); - } - - schGetTaskFromTaskList(pJob->execTasks, pParam->taskId, &pTask); - if (NULL == pTask) { - if (TDMT_VND_EXPLAIN_RSP == msgType) { - schGetTaskFromTaskList(pJob->succTasks, pParam->taskId, &pTask); - } else { - SCH_JOB_ELOG("task not found in execTask list, refId:%" PRIx64 ", taskId:%" PRIx64, pParam->refId, - pParam->taskId); - SCH_ERR_JRET(TSDB_CODE_SCH_INTERNAL_ERROR); - } - } - - if (NULL == pTask) { - SCH_JOB_ELOG("task not found in execList & succList, refId:%" PRIx64 ", taskId:%" PRIx64, pParam->refId, - pParam->taskId); - SCH_ERR_JRET(TSDB_CODE_SCH_INTERNAL_ERROR); - } - - SCH_TASK_DLOG("rsp msg received, type:%s, handle:%p, code:%s", TMSG_INFO(msgType), pMsg->handle, tstrerror(rspCode)); - - SCH_SET_TASK_HANDLE(pTask, pMsg->handle); - schUpdateTaskExecNodeHandle(pTask, pMsg->handle, rspCode); - SCH_ERR_JRET(schHandleResponseMsg(pJob, pTask, msgType, pMsg->pData, pMsg->len, rspCode)); - -_return: - if (pJob) { - schReleaseJob(pParam->refId); - } - - taosMemoryFreeClear(param); - SCH_RET(code); -} - -int32_t schHandleSubmitCallback(void *param, const SDataBuf *pMsg, int32_t code) { - return schHandleCallback(param, pMsg, TDMT_VND_SUBMIT_RSP, code); -} - -int32_t schHandleCreateTableCallback(void *param, const SDataBuf *pMsg, int32_t code) { - return schHandleCallback(param, pMsg, TDMT_VND_CREATE_TABLE_RSP, code); -} - -int32_t schHandleDropTableCallback(void *param, const SDataBuf *pMsg, int32_t code) { - return schHandleCallback(param, pMsg, TDMT_VND_DROP_TABLE_RSP, code); -} - -int32_t schHandleAlterTableCallback(void *param, const SDataBuf *pMsg, int32_t code) { - return schHandleCallback(param, pMsg, TDMT_VND_ALTER_TABLE_RSP, code); -} - -int32_t schHandleQueryCallback(void *param, const SDataBuf *pMsg, int32_t code) { - return schHandleCallback(param, pMsg, TDMT_VND_QUERY_RSP, code); -} - -int32_t schHandleFetchCallback(void *param, const SDataBuf *pMsg, int32_t code) { - return schHandleCallback(param, pMsg, TDMT_VND_FETCH_RSP, code); -} - -int32_t schHandleReadyCallback(void *param, const SDataBuf *pMsg, int32_t code) { - return schHandleCallback(param, pMsg, TDMT_VND_RES_READY_RSP, code); -} - -int32_t schHandleExplainCallback(void *param, const SDataBuf *pMsg, int32_t code) { - return schHandleCallback(param, pMsg, TDMT_VND_EXPLAIN_RSP, code); -} - -int32_t schHandleDropCallback(void *param, const SDataBuf *pMsg, int32_t code) { - SSchTaskCallbackParam *pParam = (SSchTaskCallbackParam *)param; - qDebug("QID:%" PRIx64 ",TID:%" PRIx64 " drop task rsp received, code:%x", pParam->queryId, pParam->taskId, code); - return TSDB_CODE_SUCCESS; -} - -int32_t schHandleHbCallback(void *param, const SDataBuf *pMsg, int32_t code) { - SSchedulerHbRsp rsp = {0}; - SSchTaskCallbackParam *pParam = (SSchTaskCallbackParam *)param; - - if (code) { - qError("hb rsp error:%s", tstrerror(code)); - SCH_ERR_JRET(code); - } - - if (tDeserializeSSchedulerHbRsp(pMsg->pData, pMsg->len, &rsp)) { - qError("invalid hb rsp msg, size:%d", pMsg->len); - SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT); - } - - SSchTrans trans = {0}; - trans.transInst = pParam->transport; - trans.transHandle = pMsg->handle; - - SCH_ERR_JRET(schUpdateHbConnection(&rsp.epId, &trans)); - - int32_t taskNum = (int32_t)taosArrayGetSize(rsp.taskStatus); - qDebug("%d task status in hb rsp, nodeId:%d, fqdn:%s, port:%d", taskNum, rsp.epId.nodeId, rsp.epId.ep.fqdn, - rsp.epId.ep.port); - - for (int32_t i = 0; i < taskNum; ++i) { - STaskStatus *taskStatus = taosArrayGet(rsp.taskStatus, i); - - SSchJob *pJob = schAcquireJob(taskStatus->refId); - if (NULL == pJob) { - qWarn("job not found, refId:0x%" PRIx64 ",QID:0x%" PRIx64 ",TID:0x%" PRIx64, taskStatus->refId, - taskStatus->queryId, taskStatus->taskId); - // TODO DROP TASK FROM SERVER!!!! - continue; - } - - // TODO - - SCH_JOB_DLOG("TID:0x%" PRIx64 " task status in server: %s", taskStatus->taskId, - jobTaskStatusStr(taskStatus->status)); - - schReleaseJob(taskStatus->refId); - } - -_return: - - tFreeSSchedulerHbRsp(&rsp); - taosMemoryFree(param); - - SCH_RET(code); -} - -int32_t schHandleLinkBrokenCallback(void *param, const SDataBuf *pMsg, int32_t code) { - SSchCallbackParamHeader *head = (SSchCallbackParamHeader *)param; - rpcReleaseHandle(pMsg->handle, TAOS_CONN_CLIENT); - - qDebug("handle %p is broken", pMsg->handle); - - if (head->isHbParam) { - SSchHbCallbackParam *hbParam = (SSchHbCallbackParam *)param; - SSchTrans trans = {.transInst = hbParam->transport, .transHandle = NULL}; - SCH_ERR_RET(schUpdateHbConnection(&hbParam->nodeEpId, &trans)); - - SCH_ERR_RET(schBuildAndSendHbMsg(&hbParam->nodeEpId)); - } else { - SCH_ERR_RET(schHandleCallback(param, pMsg, TDMT_SCH_LINK_BROKEN, code)); - } - - return TSDB_CODE_SUCCESS; -} - -int32_t schGetCallbackFp(int32_t msgType, __async_send_cb_fn_t *fp) { - switch (msgType) { - case TDMT_VND_CREATE_TABLE: - *fp = schHandleCreateTableCallback; - break; - case TDMT_VND_DROP_TABLE: - *fp = schHandleDropTableCallback; - break; - case TDMT_VND_ALTER_TABLE: - *fp = schHandleAlterTableCallback; - break; - case TDMT_VND_SUBMIT: - *fp = schHandleSubmitCallback; - break; - case TDMT_VND_QUERY: - *fp = schHandleQueryCallback; - break; - case TDMT_VND_RES_READY: - *fp = schHandleReadyCallback; - break; - case TDMT_VND_EXPLAIN: - *fp = schHandleExplainCallback; - break; - case TDMT_VND_FETCH: - *fp = schHandleFetchCallback; - break; - case TDMT_VND_DROP_TASK: - *fp = schHandleDropCallback; - break; - case TDMT_VND_QUERY_HEARTBEAT: - *fp = schHandleHbCallback; - break; - case TDMT_SCH_LINK_BROKEN: - *fp = schHandleLinkBrokenCallback; - break; - default: - qError("unknown msg type for callback, msgType:%d", msgType); - SCH_ERR_RET(TSDB_CODE_QRY_APP_ERROR); - } - - return TSDB_CODE_SUCCESS; -} - -int32_t schGenerateTaskCallBackAHandle(SSchJob *pJob, SSchTask *pTask, int32_t msgType, SMsgSendInfo **pMsgSendInfo) { - int32_t code = 0; - SMsgSendInfo *msgSendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo)); - if (NULL == msgSendInfo) { - SCH_TASK_ELOG("calloc %d failed", (int32_t)sizeof(SMsgSendInfo)); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - SSchTaskCallbackParam *param = taosMemoryCalloc(1, sizeof(SSchTaskCallbackParam)); - if (NULL == param) { - SCH_TASK_ELOG("calloc %d failed", (int32_t)sizeof(SSchTaskCallbackParam)); - SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - __async_send_cb_fn_t fp = NULL; - SCH_ERR_JRET(schGetCallbackFp(msgType, &fp)); - - param->queryId = pJob->queryId; - param->refId = pJob->refId; - param->taskId = SCH_TASK_ID(pTask); - param->transport = pJob->transport; - - msgSendInfo->param = param; - msgSendInfo->fp = fp; - - *pMsgSendInfo = msgSendInfo; - - return TSDB_CODE_SUCCESS; - -_return: - - taosMemoryFree(param); - taosMemoryFree(msgSendInfo); - - SCH_RET(code); -} - -void schFreeRpcCtxVal(const void *arg) { - if (NULL == arg) { - return; - } - - SMsgSendInfo *pMsgSendInfo = (SMsgSendInfo *)arg; - taosMemoryFreeClear(pMsgSendInfo->param); - taosMemoryFreeClear(pMsgSendInfo); -} - -int32_t schMakeTaskCallbackParam(SSchJob *pJob, SSchTask *pTask, void **pParam) { - SSchTaskCallbackParam *param = taosMemoryCalloc(1, sizeof(SSchTaskCallbackParam)); - if (NULL == param) { - SCH_TASK_ELOG("calloc %d failed", (int32_t)sizeof(SSchTaskCallbackParam)); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - param->queryId = pJob->queryId; - param->refId = pJob->refId; - param->taskId = SCH_TASK_ID(pTask); - param->transport = pJob->transport; - - *pParam = param; - - return TSDB_CODE_SUCCESS; -} - -int32_t schMakeHbCallbackParam(SSchJob *pJob, SSchTask *pTask, void **pParam) { - SSchHbCallbackParam *param = taosMemoryCalloc(1, sizeof(SSchHbCallbackParam)); - if (NULL == param) { - SCH_TASK_ELOG("calloc %d failed", (int32_t)sizeof(SSchHbCallbackParam)); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - param->head.isHbParam = true; - - SQueryNodeAddr *addr = taosArrayGet(pTask->candidateAddrs, pTask->candidateIdx); - - param->nodeEpId.nodeId = addr->nodeId; - memcpy(¶m->nodeEpId.ep, SCH_GET_CUR_EP(addr), sizeof(SEp)); - param->transport = pJob->transport; - - *pParam = param; - - return TSDB_CODE_SUCCESS; -} - -int32_t schMakeBrokenLinkVal(SSchJob *pJob, SSchTask *pTask, SRpcBrokenlinkVal *brokenVal, bool isHb) { - int32_t code = 0; - SMsgSendInfo *pMsgSendInfo = NULL; - - pMsgSendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo)); - if (NULL == pMsgSendInfo) { - SCH_TASK_ELOG("calloc %d failed", (int32_t)sizeof(SMsgSendInfo)); - SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - if (isHb) { - SCH_ERR_JRET(schMakeHbCallbackParam(pJob, pTask, &pMsgSendInfo->param)); - } else { - SCH_ERR_JRET(schMakeTaskCallbackParam(pJob, pTask, &pMsgSendInfo->param)); - } - - int32_t msgType = TDMT_SCH_LINK_BROKEN; - __async_send_cb_fn_t fp = NULL; - SCH_ERR_JRET(schGetCallbackFp(msgType, &fp)); - - pMsgSendInfo->fp = fp; - - brokenVal->msgType = msgType; - brokenVal->val = pMsgSendInfo; - brokenVal->clone = schCloneSMsgSendInfo; - brokenVal->freeFunc = schFreeRpcCtxVal; - - return TSDB_CODE_SUCCESS; - -_return: - - taosMemoryFreeClear(pMsgSendInfo->param); - taosMemoryFreeClear(pMsgSendInfo); - - SCH_RET(code); -} - -int32_t schMakeQueryRpcCtx(SSchJob *pJob, SSchTask *pTask, SRpcCtx *pCtx) { - int32_t code = 0; - SMsgSendInfo *pReadyMsgSendInfo = NULL; - SMsgSendInfo *pExplainMsgSendInfo = NULL; - - pCtx->args = taosHashInit(1, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_ENTRY_LOCK); - if (NULL == pCtx->args) { - SCH_TASK_ELOG("taosHashInit %d RpcCtx failed", 1); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - SCH_ERR_JRET(schGenerateTaskCallBackAHandle(pJob, pTask, TDMT_VND_RES_READY, &pReadyMsgSendInfo)); - SCH_ERR_JRET(schGenerateTaskCallBackAHandle(pJob, pTask, TDMT_VND_EXPLAIN, &pExplainMsgSendInfo)); - - int32_t msgType = TDMT_VND_RES_READY_RSP; - SRpcCtxVal ctxVal = {.val = pReadyMsgSendInfo, .clone = schCloneSMsgSendInfo, .freeFunc = schFreeRpcCtxVal}; - if (taosHashPut(pCtx->args, &msgType, sizeof(msgType), &ctxVal, sizeof(ctxVal))) { - SCH_TASK_ELOG("taosHashPut msg %d to rpcCtx failed", msgType); - SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - msgType = TDMT_VND_EXPLAIN_RSP; - ctxVal.val = pExplainMsgSendInfo; - if (taosHashPut(pCtx->args, &msgType, sizeof(msgType), &ctxVal, sizeof(ctxVal))) { - SCH_TASK_ELOG("taosHashPut msg %d to rpcCtx failed", msgType); - SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - SCH_ERR_JRET(schMakeBrokenLinkVal(pJob, pTask, &pCtx->brokenVal, false)); - - return TSDB_CODE_SUCCESS; - -_return: - - taosHashCleanup(pCtx->args); - - if (pReadyMsgSendInfo) { - taosMemoryFreeClear(pReadyMsgSendInfo->param); - taosMemoryFreeClear(pReadyMsgSendInfo); - } - - if (pExplainMsgSendInfo) { - taosMemoryFreeClear(pExplainMsgSendInfo->param); - taosMemoryFreeClear(pExplainMsgSendInfo); - } - - SCH_RET(code); -} - -int32_t schMakeHbRpcCtx(SSchJob *pJob, SSchTask *pTask, SRpcCtx *pCtx) { - int32_t code = 0; - SSchHbCallbackParam *param = NULL; - SMsgSendInfo *pMsgSendInfo = NULL; - SQueryNodeAddr *addr = taosArrayGet(pTask->candidateAddrs, pTask->candidateIdx); - SQueryNodeEpId epId = {0}; - - epId.nodeId = addr->nodeId; - memcpy(&epId.ep, SCH_GET_CUR_EP(addr), sizeof(SEp)); - - pCtx->args = taosHashInit(1, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_ENTRY_LOCK); - if (NULL == pCtx->args) { - SCH_TASK_ELOG("taosHashInit %d RpcCtx failed", 1); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - pMsgSendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo)); - if (NULL == pMsgSendInfo) { - SCH_TASK_ELOG("calloc %d failed", (int32_t)sizeof(SMsgSendInfo)); - SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - param = taosMemoryCalloc(1, sizeof(SSchHbCallbackParam)); - if (NULL == param) { - SCH_TASK_ELOG("calloc %d failed", (int32_t)sizeof(SSchHbCallbackParam)); - SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - int32_t msgType = TDMT_VND_QUERY_HEARTBEAT_RSP; - __async_send_cb_fn_t fp = NULL; - SCH_ERR_JRET(schGetCallbackFp(TDMT_VND_QUERY_HEARTBEAT, &fp)); - - param->nodeEpId = epId; - param->transport = pJob->transport; - - pMsgSendInfo->param = param; - pMsgSendInfo->fp = fp; - - SRpcCtxVal ctxVal = {.val = pMsgSendInfo, .clone = schCloneSMsgSendInfo, .freeFunc = schFreeRpcCtxVal}; - if (taosHashPut(pCtx->args, &msgType, sizeof(msgType), &ctxVal, sizeof(ctxVal))) { - SCH_TASK_ELOG("taosHashPut msg %d to rpcCtx failed", msgType); - SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - SCH_ERR_JRET(schMakeBrokenLinkVal(pJob, pTask, &pCtx->brokenVal, true)); - - return TSDB_CODE_SUCCESS; - -_return: - - taosHashCleanup(pCtx->args); - taosMemoryFreeClear(param); - taosMemoryFreeClear(pMsgSendInfo); - - SCH_RET(code); -} - -int32_t schRegisterHbConnection(SSchJob *pJob, SSchTask *pTask, SQueryNodeEpId *epId, bool *exist) { - int32_t code = 0; - SSchHbTrans hb = {0}; - - hb.trans.transInst = pJob->transport; - - SCH_ERR_RET(schMakeHbRpcCtx(pJob, pTask, &hb.rpcCtx)); - - code = taosHashPut(schMgmt.hbConnections, epId, sizeof(SQueryNodeEpId), &hb, sizeof(SSchHbTrans)); - if (code) { - schFreeRpcCtx(&hb.rpcCtx); - - if (HASH_NODE_EXIST(code)) { - *exist = true; - return TSDB_CODE_SUCCESS; - } - - qError("taosHashPut hb trans failed, nodeId:%d, fqdn:%s, port:%d", epId->nodeId, epId->ep.fqdn, epId->ep.port); - SCH_ERR_RET(code); - } - - return TSDB_CODE_SUCCESS; -} - -int32_t schCloneCallbackParam(SSchCallbackParamHeader *pSrc, SSchCallbackParamHeader **pDst) { - if (pSrc->isHbParam) { - SSchHbCallbackParam *dst = taosMemoryMalloc(sizeof(SSchHbCallbackParam)); - if (NULL == dst) { - qError("malloc SSchHbCallbackParam failed"); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - memcpy(dst, pSrc, sizeof(*dst)); - *pDst = (SSchCallbackParamHeader *)dst; - - return TSDB_CODE_SUCCESS; - } - - SSchTaskCallbackParam *dst = taosMemoryMalloc(sizeof(SSchTaskCallbackParam)); - if (NULL == dst) { - qError("malloc SSchTaskCallbackParam failed"); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - memcpy(dst, pSrc, sizeof(*dst)); - *pDst = (SSchCallbackParamHeader *)dst; - - return TSDB_CODE_SUCCESS; -} - -int32_t schCloneSMsgSendInfo(void *src, void **dst) { - SMsgSendInfo *pSrc = src; - int32_t code = 0; - SMsgSendInfo *pDst = taosMemoryMalloc(sizeof(*pSrc)); - if (NULL == pDst) { - qError("malloc SMsgSendInfo for rpcCtx failed, len:%d", (int32_t)sizeof(*pSrc)); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - memcpy(pDst, pSrc, sizeof(*pSrc)); - pDst->param = NULL; - - SCH_ERR_JRET(schCloneCallbackParam(pSrc->param, (SSchCallbackParamHeader **)&pDst->param)); - - *dst = pDst; - - return TSDB_CODE_SUCCESS; - -_return: - - taosMemoryFreeClear(pDst); - SCH_RET(code); -} - -int32_t schCloneHbRpcCtx(SRpcCtx *pSrc, SRpcCtx *pDst) { - int32_t code = 0; - memcpy(&pDst->brokenVal, &pSrc->brokenVal, sizeof(pSrc->brokenVal)); - pDst->brokenVal.val = NULL; - - SCH_ERR_RET(schCloneSMsgSendInfo(pSrc->brokenVal.val, &pDst->brokenVal.val)); - - pDst->args = taosHashInit(1, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_ENTRY_LOCK); - if (NULL == pDst->args) { - qError("taosHashInit %d RpcCtx failed", 1); - SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - SRpcCtxVal dst = {0}; - void *pIter = taosHashIterate(pSrc->args, NULL); - while (pIter) { - SRpcCtxVal *pVal = (SRpcCtxVal *)pIter; - int32_t *msgType = taosHashGetKey(pIter, NULL); - - dst = *pVal; - dst.val = NULL; - - SCH_ERR_JRET(schCloneSMsgSendInfo(pVal->val, &dst.val)); - - if (taosHashPut(pDst->args, msgType, sizeof(*msgType), &dst, sizeof(dst))) { - qError("taosHashPut msg %d to rpcCtx failed", *msgType); - (*dst.freeFunc)(dst.val); - SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - pIter = taosHashIterate(pSrc->args, pIter); - } - - return TSDB_CODE_SUCCESS; - -_return: - - schFreeRpcCtx(pDst); - SCH_RET(code); -} - -int32_t schAsyncSendMsg(SSchJob *pJob, SSchTask *pTask, void *transport, SEpSet *epSet, int32_t msgType, void *msg, - uint32_t msgSize, bool persistHandle, SRpcCtx *ctx) { - int32_t code = 0; - - SSchTrans *trans = (SSchTrans *)transport; - - SMsgSendInfo *pMsgSendInfo = NULL; - SCH_ERR_JRET(schGenerateTaskCallBackAHandle(pJob, pTask, msgType, &pMsgSendInfo)); - - pMsgSendInfo->msgInfo.pData = msg; - pMsgSendInfo->msgInfo.len = msgSize; - pMsgSendInfo->msgInfo.handle = trans->transHandle; - pMsgSendInfo->msgType = msgType; - - qDebug("start to send %s msg to node[%d,%s,%d], refId:%" PRIx64 "instance:%p, handle:%p", TMSG_INFO(msgType), - ntohl(((SMsgHead *)msg)->vgId), epSet->eps[epSet->inUse].fqdn, epSet->eps[epSet->inUse].port, pJob->refId, - trans->transInst, trans->transHandle); - - int64_t transporterId = 0; - code = asyncSendMsgToServerExt(trans->transInst, epSet, &transporterId, pMsgSendInfo, persistHandle, ctx); - if (code) { - SCH_ERR_JRET(code); - } - - SCH_TASK_DLOG("req msg sent, refId:%" PRIx64 ", type:%d, %s", pJob->refId, msgType, TMSG_INFO(msgType)); - return TSDB_CODE_SUCCESS; - -_return: - - if (pMsgSendInfo) { - taosMemoryFreeClear(pMsgSendInfo->param); - taosMemoryFreeClear(pMsgSendInfo); - } - - SCH_RET(code); -} - -int32_t schBuildAndSendHbMsg(SQueryNodeEpId *nodeEpId) { - SSchedulerHbReq req = {0}; - int32_t code = 0; - SRpcCtx rpcCtx = {0}; - SSchTrans trans = {0}; - int32_t msgType = TDMT_VND_QUERY_HEARTBEAT; - - req.header.vgId = nodeEpId->nodeId; - req.sId = schMgmt.sId; - memcpy(&req.epId, nodeEpId, sizeof(SQueryNodeEpId)); - - SSchHbTrans *hb = taosHashGet(schMgmt.hbConnections, nodeEpId, sizeof(SQueryNodeEpId)); - if (NULL == hb) { - qError("taosHashGet hb connection failed, nodeId:%d, fqdn:%s, port:%d", nodeEpId->nodeId, nodeEpId->ep.fqdn, - nodeEpId->ep.port); - SCH_ERR_RET(code); - } - - SCH_LOCK(SCH_WRITE, &hb->lock); - code = schCloneHbRpcCtx(&hb->rpcCtx, &rpcCtx); - memcpy(&trans, &hb->trans, sizeof(trans)); - SCH_UNLOCK(SCH_WRITE, &hb->lock); - - SCH_ERR_RET(code); - - int32_t msgSize = tSerializeSSchedulerHbReq(NULL, 0, &req); - if (msgSize < 0) { - qError("tSerializeSSchedulerHbReq hbReq failed, size:%d", msgSize); - SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - void *msg = taosMemoryCalloc(1, msgSize); - if (NULL == msg) { - qError("calloc hb req %d failed", msgSize); - SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - if (tSerializeSSchedulerHbReq(msg, msgSize, &req) < 0) { - qError("tSerializeSSchedulerHbReq hbReq failed, size:%d", msgSize); - SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - SMsgSendInfo *pMsgSendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo)); - if (NULL == pMsgSendInfo) { - qError("calloc SMsgSendInfo failed"); - SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - SSchTaskCallbackParam *param = taosMemoryCalloc(1, sizeof(SSchTaskCallbackParam)); - if (NULL == param) { - qError("calloc SSchTaskCallbackParam failed"); - SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - __async_send_cb_fn_t fp = NULL; - SCH_ERR_JRET(schGetCallbackFp(msgType, &fp)); - - param->transport = trans.transInst; - - pMsgSendInfo->param = param; - pMsgSendInfo->msgInfo.pData = msg; - pMsgSendInfo->msgInfo.len = msgSize; - pMsgSendInfo->msgInfo.handle = trans.transHandle; - pMsgSendInfo->msgType = msgType; - pMsgSendInfo->fp = fp; - - int64_t transporterId = 0; - SEpSet epSet = {.inUse = 0, .numOfEps = 1}; - memcpy(&epSet.eps[0], &nodeEpId->ep, sizeof(nodeEpId->ep)); - - qDebug("start to send hb msg, instance:%p, handle:%p, fqdn:%s, port:%d", trans.transInst, trans.transHandle, - nodeEpId->ep.fqdn, nodeEpId->ep.port); - - code = asyncSendMsgToServerExt(trans.transInst, &epSet, &transporterId, pMsgSendInfo, true, &rpcCtx); - if (code) { - qError("fail to send hb msg, instance:%p, handle:%p, fqdn:%s, port:%d, error:%x - %s", trans.transInst, - trans.transHandle, nodeEpId->ep.fqdn, nodeEpId->ep.port, code, tstrerror(code)); - SCH_ERR_JRET(code); - } - - qDebug("hb msg sent"); - return TSDB_CODE_SUCCESS; - -_return: - - taosMemoryFreeClear(msg); - taosMemoryFreeClear(param); - taosMemoryFreeClear(pMsgSendInfo); - schFreeRpcCtx(&rpcCtx); - SCH_RET(code); -} - -int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr, int32_t msgType) { - uint32_t msgSize = 0; - void *msg = NULL; - int32_t code = 0; - bool isCandidateAddr = false; - bool persistHandle = false; - SRpcCtx rpcCtx = {0}; - - if (NULL == addr) { - addr = taosArrayGet(pTask->candidateAddrs, pTask->candidateIdx); - isCandidateAddr = true; - } - - SEpSet epSet = addr->epSet; - - switch (msgType) { - case TDMT_VND_CREATE_TABLE: - case TDMT_VND_DROP_TABLE: - case TDMT_VND_ALTER_TABLE: - case TDMT_VND_SUBMIT: { - msgSize = pTask->msgLen; - msg = taosMemoryCalloc(1, msgSize); - if (NULL == msg) { - SCH_TASK_ELOG("calloc %d failed", msgSize); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - memcpy(msg, pTask->msg, msgSize); - break; - } - - case TDMT_VND_QUERY: { - SCH_ERR_RET(schMakeQueryRpcCtx(pJob, pTask, &rpcCtx)); - - uint32_t len = strlen(pJob->sql); - msgSize = sizeof(SSubQueryMsg) + pTask->msgLen + len; - msg = taosMemoryCalloc(1, msgSize); - if (NULL == msg) { - SCH_TASK_ELOG("calloc %d failed", msgSize); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - SSubQueryMsg *pMsg = msg; - pMsg->header.vgId = htonl(addr->nodeId); - pMsg->sId = htobe64(schMgmt.sId); - pMsg->queryId = htobe64(pJob->queryId); - pMsg->taskId = htobe64(pTask->taskId); - pMsg->refId = htobe64(pJob->refId); - pMsg->taskType = TASK_TYPE_TEMP; - pMsg->explain = SCH_IS_EXPLAIN_JOB(pJob); - pMsg->phyLen = htonl(pTask->msgLen); - pMsg->sqlLen = htonl(len); - - memcpy(pMsg->msg, pJob->sql, len); - memcpy(pMsg->msg + len, pTask->msg, pTask->msgLen); - - persistHandle = true; - break; - } - - case TDMT_VND_RES_READY: { - msgSize = sizeof(SResReadyReq); - msg = taosMemoryCalloc(1, msgSize); - if (NULL == msg) { - SCH_TASK_ELOG("calloc %d failed", msgSize); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - SResReadyReq *pMsg = msg; - - pMsg->header.vgId = htonl(addr->nodeId); - - pMsg->sId = htobe64(schMgmt.sId); - pMsg->queryId = htobe64(pJob->queryId); - pMsg->taskId = htobe64(pTask->taskId); - break; - } - case TDMT_VND_FETCH: { - msgSize = sizeof(SResFetchReq); - msg = taosMemoryCalloc(1, msgSize); - if (NULL == msg) { - SCH_TASK_ELOG("calloc %d failed", msgSize); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - SResFetchReq *pMsg = msg; - - pMsg->header.vgId = htonl(addr->nodeId); - - pMsg->sId = htobe64(schMgmt.sId); - pMsg->queryId = htobe64(pJob->queryId); - pMsg->taskId = htobe64(pTask->taskId); - - break; - } - case TDMT_VND_DROP_TASK: { - msgSize = sizeof(STaskDropReq); - msg = taosMemoryCalloc(1, msgSize); - if (NULL == msg) { - SCH_TASK_ELOG("calloc %d failed", msgSize); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - STaskDropReq *pMsg = msg; - - pMsg->header.vgId = htonl(addr->nodeId); - - pMsg->sId = htobe64(schMgmt.sId); - pMsg->queryId = htobe64(pJob->queryId); - pMsg->taskId = htobe64(pTask->taskId); - pMsg->refId = htobe64(pJob->refId); - break; - } - case TDMT_VND_QUERY_HEARTBEAT: { - SCH_ERR_RET(schMakeHbRpcCtx(pJob, pTask, &rpcCtx)); - - SSchedulerHbReq req = {0}; - req.sId = schMgmt.sId; - req.header.vgId = addr->nodeId; - req.epId.nodeId = addr->nodeId; - memcpy(&req.epId.ep, SCH_GET_CUR_EP(addr), sizeof(SEp)); - - msgSize = tSerializeSSchedulerHbReq(NULL, 0, &req); - if (msgSize < 0) { - SCH_JOB_ELOG("tSerializeSSchedulerHbReq hbReq failed, size:%d", msgSize); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - msg = taosMemoryCalloc(1, msgSize); - if (NULL == msg) { - SCH_JOB_ELOG("calloc %d failed", msgSize); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - if (tSerializeSSchedulerHbReq(msg, msgSize, &req) < 0) { - SCH_JOB_ELOG("tSerializeSSchedulerHbReq hbReq failed, size:%d", msgSize); - SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - persistHandle = true; - break; - } - default: - SCH_TASK_ELOG("unknown msg type to send, msgType:%d", msgType); - SCH_ERR_RET(TSDB_CODE_SCH_INTERNAL_ERROR); - break; - } - - SCH_SET_TASK_LASTMSG_TYPE(pTask, msgType); - - SSchTrans trans = {.transInst = pJob->transport, .transHandle = SCH_GET_TASK_HANDLE(pTask)}; - SCH_ERR_JRET(schAsyncSendMsg(pJob, pTask, &trans, &epSet, msgType, msg, msgSize, persistHandle, - (rpcCtx.args ? &rpcCtx : NULL))); - - if (msgType == TDMT_VND_QUERY) { - SCH_ERR_RET(schRecordTaskExecNode(pJob, pTask, addr, trans.transHandle)); - } - - return TSDB_CODE_SUCCESS; - -_return: - - SCH_SET_TASK_LASTMSG_TYPE(pTask, -1); - schFreeRpcCtx(&rpcCtx); - - taosMemoryFreeClear(msg); - SCH_RET(code); -} - -int32_t schEnsureHbConnection(SSchJob *pJob, SSchTask *pTask) { - SQueryNodeAddr *addr = taosArrayGet(pTask->candidateAddrs, pTask->candidateIdx); - SQueryNodeEpId epId = {0}; - - epId.nodeId = addr->nodeId; - memcpy(&epId.ep, SCH_GET_CUR_EP(addr), sizeof(SEp)); - -#if 1 - SSchHbTrans *hb = taosHashGet(schMgmt.hbConnections, &epId, sizeof(SQueryNodeEpId)); - if (NULL == hb) { - bool exist = false; - SCH_ERR_RET(schRegisterHbConnection(pJob, pTask, &epId, &exist)); - if (!exist) { - SCH_ERR_RET(schBuildAndSendHbMsg(&epId)); - } - } -#endif - - return TSDB_CODE_SUCCESS; -} - -int32_t schLaunchTaskImpl(SSchJob *pJob, SSchTask *pTask) { - int8_t status = 0; - int32_t code = 0; - - atomic_add_fetch_32(&pTask->level->taskLaunchedNum, 1); - - if (schJobNeedToStop(pJob, &status)) { - SCH_TASK_DLOG("no need to launch task cause of job status, job status:%s", jobTaskStatusStr(status)); - - SCH_RET(atomic_load_32(&pJob->errCode)); - } - - // NOTE: race condition: the task should be put into the hash table before send msg to server - if (SCH_GET_TASK_STATUS(pTask) != JOB_TASK_STATUS_EXECUTING) { - SCH_ERR_RET(schPushTaskToExecList(pJob, pTask)); - SCH_SET_TASK_STATUS(pTask, JOB_TASK_STATUS_EXECUTING); - } - - SSubplan *plan = pTask->plan; - - if (NULL == pTask->msg) { // TODO add more detailed reason for failure - code = qSubPlanToString(plan, &pTask->msg, &pTask->msgLen); - if (TSDB_CODE_SUCCESS != code) { - SCH_TASK_ELOG("failed to create physical plan, code:%s, msg:%p, len:%d", tstrerror(code), pTask->msg, - pTask->msgLen); - SCH_ERR_RET(code); - } else { - SCH_TASK_DLOGL("physical plan len:%d, %s", pTask->msgLen, pTask->msg); - } - } - - SCH_ERR_RET(schSetTaskCandidateAddrs(pJob, pTask)); - - if (SCH_IS_QUERY_JOB(pJob)) { - SCH_ERR_RET(schEnsureHbConnection(pJob, pTask)); - } - - SCH_ERR_RET(schBuildAndSendMsg(pJob, pTask, NULL, plan->msgType)); - - return TSDB_CODE_SUCCESS; -} - -// Note: no more error processing, handled in function internal -int32_t schLaunchTask(SSchJob *pJob, SSchTask *pTask) { - bool enough = false; - int32_t code = 0; - - SCH_SET_TASK_HANDLE(pTask, NULL); - - if (SCH_TASK_NEED_FLOW_CTRL(pJob, pTask)) { - SCH_ERR_JRET(schCheckIncTaskFlowQuota(pJob, pTask, &enough)); - - if (enough) { - SCH_ERR_JRET(schLaunchTaskImpl(pJob, pTask)); - } - } else { - SCH_ERR_JRET(schLaunchTaskImpl(pJob, pTask)); - } - - return TSDB_CODE_SUCCESS; - -_return: - - SCH_RET(schProcessOnTaskFailure(pJob, pTask, code)); -} - -int32_t schLaunchLevelTasks(SSchJob *pJob, SSchLevel *level) { - for (int32_t i = 0; i < level->taskNum; ++i) { - SSchTask *pTask = taosArrayGet(level->subTasks, i); - - SCH_ERR_RET(schLaunchTask(pJob, pTask)); - } - - return TSDB_CODE_SUCCESS; -} - -int32_t schLaunchJob(SSchJob *pJob) { - SSchLevel *level = taosArrayGet(pJob->levels, pJob->levelIdx); - - SCH_ERR_RET(schCheckAndUpdateJobStatus(pJob, JOB_TASK_STATUS_EXECUTING)); - - SCH_ERR_RET(schCheckJobNeedFlowCtrl(pJob, level)); - - SCH_ERR_RET(schLaunchLevelTasks(pJob, level)); - - return TSDB_CODE_SUCCESS; -} - -void schDropTaskOnExecutedNode(SSchJob *pJob, SSchTask *pTask) { - if (NULL == pTask->execNodes) { - SCH_TASK_DLOG("no exec address, status:%s", SCH_GET_TASK_STATUS_STR(pTask)); - return; - } - - int32_t size = (int32_t)taosArrayGetSize(pTask->execNodes); - - if (size <= 0) { - SCH_TASK_DLOG("task has no execNodes, no need to drop it, status:%s", SCH_GET_TASK_STATUS_STR(pTask)); - return; - } - - SSchNodeInfo *nodeInfo = NULL; - for (int32_t i = 0; i < size; ++i) { - nodeInfo = (SSchNodeInfo *)taosArrayGet(pTask->execNodes, i); - SCH_SET_TASK_HANDLE(pTask, nodeInfo->handle); - - schBuildAndSendMsg(pJob, pTask, &nodeInfo->addr, TDMT_VND_DROP_TASK); - } - - SCH_TASK_DLOG("task has %d exec address", size); -} - -void schDropTaskInHashList(SSchJob *pJob, SHashObj *list) { - if (!SCH_IS_NEED_DROP_JOB(pJob)) { - return; - } - - void *pIter = taosHashIterate(list, NULL); - while (pIter) { - SSchTask *pTask = *(SSchTask **)pIter; - - schDropTaskOnExecutedNode(pJob, pTask); - - pIter = taosHashIterate(list, pIter); - } -} - -void schDropJobAllTasks(SSchJob *pJob) { - schDropTaskInHashList(pJob, pJob->execTasks); - schDropTaskInHashList(pJob, pJob->succTasks); - schDropTaskInHashList(pJob, pJob->failTasks); -} - -int32_t schCancelJob(SSchJob *pJob) { - // TODO - return TSDB_CODE_SUCCESS; - // TODO MOVE ALL TASKS FROM EXEC LIST TO FAIL LIST -} - -void schCloseJobRef(void) { - if (!atomic_load_8((int8_t *)&schMgmt.exit)) { - return; - } - - SCH_LOCK(SCH_WRITE, &schMgmt.lock); - if (atomic_load_32(&schMgmt.jobNum) <= 0 && schMgmt.jobRef >= 0) { - taosCloseRef(schMgmt.jobRef); - schMgmt.jobRef = -1; - } - SCH_UNLOCK(SCH_WRITE, &schMgmt.lock); -} - -void schFreeJobImpl(void *job) { - if (NULL == job) { - return; - } - - SSchJob *pJob = job; - uint64_t queryId = pJob->queryId; - int64_t refId = pJob->refId; - - if (pJob->status == JOB_TASK_STATUS_EXECUTING) { - schCancelJob(pJob); - } - - schDropJobAllTasks(pJob); - - pJob->subPlans = NULL; // it is a reference to pDag->pSubplans - - int32_t numOfLevels = taosArrayGetSize(pJob->levels); - for (int32_t i = 0; i < numOfLevels; ++i) { - SSchLevel *pLevel = taosArrayGet(pJob->levels, i); - - schFreeFlowCtrl(pLevel); - - int32_t numOfTasks = taosArrayGetSize(pLevel->subTasks); - for (int32_t j = 0; j < numOfTasks; ++j) { - SSchTask *pTask = taosArrayGet(pLevel->subTasks, j); - schFreeTask(pTask); - } - - taosArrayDestroy(pLevel->subTasks); - } - - taosHashCleanup(pJob->execTasks); - taosHashCleanup(pJob->failTasks); - taosHashCleanup(pJob->succTasks); - - taosArrayDestroy(pJob->levels); - taosArrayDestroy(pJob->nodeList); - - qExplainFreeCtx(pJob->explainCtx); - - if (SCH_IS_QUERY_JOB(pJob)) { - taosArrayDestroy((SArray *)pJob->queryRes); - } else { - tFreeSSubmitRsp((SSubmitRsp*)pJob->queryRes); - } - - taosMemoryFreeClear(pJob->resData); - taosMemoryFreeClear(pJob); - - qDebug("QID:0x%" PRIx64 " job freed, refId:%" PRIx64 ", pointer:%p", queryId, refId, pJob); - - atomic_sub_fetch_32(&schMgmt.jobNum, 1); - - schCloseJobRef(); -} - -static int32_t schExecJobImpl(void *transport, SArray *pNodeList, SQueryPlan *pDag, int64_t *job, const char *sql, - int64_t startTs, bool syncSchedule) { - qDebug("QID:0x%" PRIx64 " job started", pDag->queryId); - - if (pNodeList == NULL || taosArrayGetSize(pNodeList) <= 0) { - qDebug("QID:0x%" PRIx64 " input exec nodeList is empty", pDag->queryId); - } - - int32_t code = 0; - SSchJob *pJob = NULL; - SCH_ERR_JRET(schInitJob(&pJob, pDag, transport, pNodeList, sql, startTs, syncSchedule)); - - SCH_ERR_JRET(schLaunchJob(pJob)); - - *job = pJob->refId; - - if (syncSchedule) { - SCH_JOB_DLOG("will wait for rsp now, job status:%s", SCH_GET_JOB_STATUS_STR(pJob)); - tsem_wait(&pJob->rspSem); - } - - SCH_JOB_DLOG("job exec done, job status:%s", SCH_GET_JOB_STATUS_STR(pJob)); - - schReleaseJob(pJob->refId); - - return TSDB_CODE_SUCCESS; - -_return: - - schFreeJobImpl(pJob); - SCH_RET(code); -} - -int32_t schExecStaticExplain(void *transport, SArray *pNodeList, SQueryPlan *pDag, int64_t *job, const char *sql, - bool syncSchedule) { - qDebug("QID:0x%" PRIx64 " job started", pDag->queryId); - - int32_t code = 0; - SSchJob *pJob = taosMemoryCalloc(1, sizeof(SSchJob)); - if (NULL == pJob) { - qError("QID:%" PRIx64 " calloc %d failed", pDag->queryId, (int32_t)sizeof(SSchJob)); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - pJob->sql = sql; - pJob->attr.queryJob = true; - pJob->attr.explainMode = pDag->explainInfo.mode; - pJob->queryId = pDag->queryId; - pJob->subPlans = pDag->pSubplans; - - SCH_ERR_JRET(qExecStaticExplain(pDag, (SRetrieveTableRsp **)&pJob->resData)); - - int64_t refId = taosAddRef(schMgmt.jobRef, pJob); - if (refId < 0) { - SCH_JOB_ELOG("taosAddRef job failed, error:%s", tstrerror(terrno)); - SCH_ERR_JRET(terrno); - } - - if (NULL == schAcquireJob(refId)) { - SCH_JOB_ELOG("schAcquireJob job failed, refId:%" PRIx64, refId); - SCH_RET(TSDB_CODE_SCH_STATUS_ERROR); - } - - pJob->refId = refId; - - SCH_JOB_DLOG("job refId:%" PRIx64, pJob->refId); - - pJob->status = JOB_TASK_STATUS_PARTIAL_SUCCEED; - *job = pJob->refId; - SCH_JOB_DLOG("job exec done, job status:%s", SCH_GET_JOB_STATUS_STR(pJob)); - - schReleaseJob(pJob->refId); - - return TSDB_CODE_SUCCESS; - -_return: - - schFreeJobImpl(pJob); - SCH_RET(code); -} - int32_t schedulerInit(SSchedulerCfg *cfg) { if (schMgmt.jobRef >= 0) { qError("scheduler already initialized"); @@ -2559,177 +67,49 @@ int32_t schedulerInit(SSchedulerCfg *cfg) { return TSDB_CODE_SUCCESS; } -int32_t schedulerExecJob(void *transport, SArray *nodeList, SQueryPlan *pDag, int64_t *pJob, const char *sql, +int32_t schedulerExecJob(void *pTrans, SArray *pNodeList, SQueryPlan *pDag, int64_t *pJob, const char *sql, int64_t startTs, SQueryResult *pRes) { - if (NULL == transport || NULL == pDag || NULL == pDag->pSubplans || NULL == pJob || NULL == pRes) { - SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); - } - - int32_t code = 0; - - *pJob = 0; - - if (EXPLAIN_MODE_STATIC == pDag->explainInfo.mode) { - SCH_ERR_RET(schExecStaticExplain(transport, nodeList, pDag, pJob, sql, true)); - } else { - SCH_ERR_JRET(schExecJobImpl(transport, nodeList, pDag, pJob, sql, startTs, true)); - } - -_return: - - if (*pJob) { - SSchJob *job = schAcquireJob(*pJob); - - pRes->code = atomic_load_32(&job->errCode); - pRes->numOfRows = job->resNumOfRows; - pRes->res = job->queryRes; - job->queryRes = NULL; - - schReleaseJob(*pJob); - } - - return code; -} - -int32_t schedulerAsyncExecJob(void *transport, SArray *pNodeList, SQueryPlan *pDag, const char *sql, int64_t *pJob) { - if (NULL == transport || NULL == pDag || NULL == pDag->pSubplans || NULL == pJob) { + if (NULL == pTrans || NULL == pDag || NULL == pDag->pSubplans || NULL == pJob || NULL == pRes) { SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); } - if (EXPLAIN_MODE_STATIC == pDag->explainInfo.mode) { - SCH_ERR_RET(schExecStaticExplain(transport, pNodeList, pDag, pJob, sql, false)); - } else { - SCH_ERR_RET(schExecJobImpl(transport, pNodeList, pDag, pJob, sql, 0, false)); - } - - return TSDB_CODE_SUCCESS; + SSchResInfo resInfo = {.queryRes = pRes}; + SCH_RET(schExecJob(pTrans, pNodeList, pDag, pJob, sql, startTs, &resInfo)); } -#if 0 -int32_t schedulerConvertDagToTaskList(SQueryPlan* pDag, SArray **pTasks) { - if (NULL == pDag || pDag->numOfSubplans <= 0 || LIST_LENGTH(pDag->pSubplans) == 0) { - SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); - } - - int32_t levelNum = LIST_LENGTH(pDag->pSubplans); - if (1 != levelNum) { - qError("invalid level num: %d", levelNum); - SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); - } - - SNodeListNode *plans = (SNodeListNode*)nodesListGetNode(pDag->pSubplans, 0); - int32_t taskNum = LIST_LENGTH(plans->pNodeList); - if (taskNum <= 0) { - qError("invalid task num: %d", taskNum); - SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); - } - - SArray *info = taosArrayInit(taskNum, sizeof(STaskInfo)); - if (NULL == info) { - qError("taosArrayInit %d taskInfo failed", taskNum); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - STaskInfo tInfo = {0}; - char *msg = NULL; - int32_t msgLen = 0; - int32_t code = 0; - - for (int32_t i = 0; i < taskNum; ++i) { - SSubplan *plan = (SSubplan*)nodesListGetNode(plans->pNodeList, i); - tInfo.addr = plan->execNode; - - code = qSubPlanToString(plan, &msg, &msgLen); - if (TSDB_CODE_SUCCESS != code) { - qError("subplanToString error, code:%x, msg:%p, len:%d", code, msg, msgLen); - SCH_ERR_JRET(code); - } - - int32_t msgSize = sizeof(SSubQueryMsg) + msgLen; - if (NULL == msg) { - qError("calloc %d failed", msgSize); - SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - SSubQueryMsg* pMsg = taosMemoryCalloc(1, msgSize); - - pMsg->header.vgId = tInfo.addr.nodeId; - - pMsg->sId = schMgmt.sId; - pMsg->queryId = plan->id.queryId; - pMsg->taskId = schGenUUID(); - pMsg->taskType = TASK_TYPE_PERSISTENT; - pMsg->phyLen = msgLen; - pMsg->sqlLen = 0; - memcpy(pMsg->msg, msg, msgLen); - /*memcpy(pMsg->msg, ((SSubQueryMsg*)msg)->msg, msgLen);*/ - - tInfo.msg = pMsg; - - if (NULL == taosArrayPush(info, &tInfo)) { - qError("taosArrayPush failed, idx:%d", i); - taosMemoryFree(msg); - SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - } - - *pTasks = info; - info = NULL; - -_return: - schedulerFreeTaskList(info); - SCH_RET(code); +int32_t schedulerAsyncExecJob(void *pTrans, SArray *pNodeList, SQueryPlan *pDag, int64_t *pJob, const char *sql, + int64_t startTs, schedulerExecCallback fp, void* param) { + if (NULL == pTrans || NULL == pDag || NULL == pDag->pSubplans || NULL == pJob || NULL == fp || NULL == param) { + SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); + } + + SSchResInfo resInfo = {.execFp = fp, .userParam = param}; + SCH_RET(schAsyncExecJob(pTrans, pNodeList, pDag, pJob, sql, startTs, &resInfo)); } -int32_t schedulerCopyTask(STaskInfo *src, SArray **dst, int32_t copyNum) { - if (NULL == src || NULL == dst || copyNum <= 0) { +int32_t schedulerFetchRows(int64_t job, void **pData) { + if (NULL == pData) { SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); } - int32_t code = 0; - - *dst = taosArrayInit(copyNum, sizeof(STaskInfo)); - if (NULL == *dst) { - qError("taosArrayInit %d taskInfo failed", copyNum); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - int32_t msgSize = src->msg->phyLen + sizeof(*src->msg); - STaskInfo info = {0}; - - info.addr = src->addr; - - for (int32_t i = 0; i < copyNum; ++i) { - info.msg = taosMemoryMalloc(msgSize); - if (NULL == info.msg) { - qError("malloc %d failed", msgSize); - SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - memcpy(info.msg, src->msg, msgSize); - - info.msg->taskId = schGenUUID(); - - if (NULL == taosArrayPush(*dst, &info)) { - qError("taosArrayPush failed, idx:%d", i); - taosMemoryFree(info.msg); - SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } + int32_t code = 0; + SSchJob *pJob = schAcquireJob(job); + if (NULL == pJob) { + qError("acquire job from jobRef list failed, may be dropped, refId:%" PRIx64, job); + SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); } - return TSDB_CODE_SUCCESS; - -_return: + pJob->attr.syncSchedule = true; + pJob->userRes.fetchRes = pData; + code = schFetchRows(pJob); - schedulerFreeTaskList(*dst); - *dst = NULL; + schReleaseJob(job); SCH_RET(code); } -#endif -int32_t schedulerFetchRows(int64_t job, void **pData) { - if (NULL == pData) { +int32_t schedulerAsyncFetchRows(int64_t job, schedulerFetchCallback fp, void* param) { + if (NULL == fp || NULL == param) { SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); } @@ -2740,76 +120,11 @@ int32_t schedulerFetchRows(int64_t job, void **pData) { SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); } - int8_t status = SCH_GET_JOB_STATUS(pJob); - if (status == JOB_TASK_STATUS_DROPPING) { - SCH_JOB_ELOG("job is dropping, status:%s", jobTaskStatusStr(status)); - schReleaseJob(job); - SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); - } - - if (!SCH_JOB_NEED_FETCH(pJob)) { - SCH_JOB_ELOG("no need to fetch data, status:%s", SCH_GET_JOB_STATUS_STR(pJob)); - schReleaseJob(job); - SCH_ERR_RET(TSDB_CODE_QRY_APP_ERROR); - } - - if (atomic_val_compare_exchange_8(&pJob->userFetch, 0, 1) != 0) { - SCH_JOB_ELOG("prior fetching not finished, userFetch:%d", atomic_load_8(&pJob->userFetch)); - schReleaseJob(job); - SCH_ERR_RET(TSDB_CODE_QRY_APP_ERROR); - } - - if (JOB_TASK_STATUS_FAILED == status || JOB_TASK_STATUS_DROPPING == status) { - SCH_JOB_ELOG("job failed or dropping, status:%s", jobTaskStatusStr(status)); - SCH_ERR_JRET(atomic_load_32(&pJob->errCode)); - } else if (status == JOB_TASK_STATUS_SUCCEED) { - SCH_JOB_DLOG("job already succeed, status:%s", jobTaskStatusStr(status)); - goto _return; - } else if (status == JOB_TASK_STATUS_PARTIAL_SUCCEED) { - if (!(pJob->attr.explainMode == EXPLAIN_MODE_STATIC)) { - SCH_ERR_JRET(schFetchFromRemote(pJob)); - tsem_wait(&pJob->rspSem); - } - } else { - SCH_JOB_ELOG("job status error for fetch, status:%s", jobTaskStatusStr(status)); - SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR); - } - - status = SCH_GET_JOB_STATUS(pJob); - - if (JOB_TASK_STATUS_FAILED == status || JOB_TASK_STATUS_DROPPING == status) { - SCH_JOB_ELOG("job failed or dropping, status:%s", jobTaskStatusStr(status)); - SCH_ERR_JRET(atomic_load_32(&pJob->errCode)); - } - - if (pJob->resData && ((SRetrieveTableRsp *)pJob->resData)->completed) { - SCH_ERR_JRET(schCheckAndUpdateJobStatus(pJob, JOB_TASK_STATUS_SUCCEED)); - } - - while (true) { - *pData = atomic_load_ptr(&pJob->resData); - if (*pData != atomic_val_compare_exchange_ptr(&pJob->resData, *pData, NULL)) { - continue; - } - - break; - } - - if (NULL == *pData) { - SRetrieveTableRsp *rsp = (SRetrieveTableRsp *)taosMemoryCalloc(1, sizeof(SRetrieveTableRsp)); - if (rsp) { - rsp->completed = 1; - } - - *pData = rsp; - SCH_JOB_DLOG("empty res and set query complete, code:%x", code); - } - - SCH_JOB_DLOG("fetch done, totalRows:%d, code:%s", pJob->resNumOfRows, tstrerror(code)); - -_return: - - atomic_val_compare_exchange_8(&pJob->userFetch, 1, 0); + pJob->attr.syncSchedule = false; + pJob->userRes.fetchFp = fp; + pJob->userRes.userParam = param; + + code = schAsyncFetchRows(pJob); schReleaseJob(job); @@ -2877,20 +192,6 @@ void schedulerFreeJob(int64_t job) { schReleaseJob(job); } -void schedulerFreeTaskList(SArray *taskList) { - if (NULL == taskList) { - return; - } - - int32_t taskNum = taosArrayGetSize(taskList); - for (int32_t i = 0; i < taskNum; ++i) { - STaskInfo *info = taosArrayGet(taskList, i); - taosMemoryFreeClear(info->msg); - } - - taosArrayDestroy(taskList); -} - void schedulerDestroy(void) { atomic_store_8((int8_t *)&schMgmt.exit, 1); diff --git a/source/libs/scheduler/test/schedulerTests.cpp b/source/libs/scheduler/test/schedulerTests.cpp index fc0e05aaf106fb11d8daa9be9a55e510aac58ff5..d5c834e5cf47484875d3613f461c2ae611f2d12b 100644 --- a/source/libs/scheduler/test/schedulerTests.cpp +++ b/source/libs/scheduler/test/schedulerTests.cpp @@ -87,6 +87,11 @@ void schtInitLogFile() { } +void schtQueryCb(SQueryResult* pResult, void* param, int32_t code) { + assert(TSDB_CODE_SUCCESS == code); + *(int32_t*)param = 1; +} + void schtBuildQueryDag(SQueryPlan *dag) { uint64_t qId = schtQueryId; @@ -485,6 +490,7 @@ void* schtRunJobThread(void *aa) { SHashObj *execTasks = NULL; SDataBuf dataBuf = {0}; uint32_t jobFinished = 0; + int32_t queryDone = 0; while (!schtTestStop) { schtBuildQueryDag(&dag); @@ -496,7 +502,8 @@ void* schtRunJobThread(void *aa) { qnodeAddr.port = 6031; taosArrayPush(qnodeList, &qnodeAddr); - code = schedulerAsyncExecJob(mockPointer, qnodeList, &dag, "select * from tb", &queryJobRefId); + queryDone = 0; + code = schedulerAsyncExecJob(mockPointer, qnodeList, &dag, &queryJobRefId, "select * from tb", 0, schtQueryCb, &queryDone); assert(code == 0); pJob = schAcquireJob(queryJobRefId); @@ -535,27 +542,6 @@ void* schtRunJobThread(void *aa) { pIter = taosHashIterate(execTasks, pIter); } - - param = (SSchTaskCallbackParam *)taosMemoryCalloc(1, sizeof(*param)); - param->refId = queryJobRefId; - param->queryId = pJob->queryId; - - pIter = taosHashIterate(execTasks, NULL); - while (pIter) { - SSchTask *task = (SSchTask *)pIter; - - param->taskId = task->taskId; - SResReadyRsp rsp = {0}; - dataBuf.pData = &rsp; - dataBuf.len = sizeof(rsp); - - code = schHandleCallback(param, &dataBuf, TDMT_VND_RES_READY_RSP, 0); - assert(code == 0 || code); - - pIter = taosHashIterate(execTasks, pIter); - } - - param = (SSchTaskCallbackParam *)taosMemoryCalloc(1, sizeof(*param)); param->refId = queryJobRefId; param->queryId = pJob->queryId; @@ -576,24 +562,13 @@ void* schtRunJobThread(void *aa) { } - param = (SSchTaskCallbackParam *)taosMemoryCalloc(1, sizeof(*param)); - param->refId = queryJobRefId; - param->queryId = pJob->queryId; - - pIter = taosHashIterate(execTasks, NULL); - while (pIter) { - SSchTask *task = (SSchTask *)pIter; - - param->taskId = task->taskId - 1; - SResReadyRsp rsp = {0}; - dataBuf.pData = &rsp; - dataBuf.len = sizeof(rsp); - - code = schHandleCallback(param, &dataBuf, TDMT_VND_RES_READY_RSP, 0); - assert(code == 0 || code); - - pIter = taosHashIterate(execTasks, pIter); - } + while (true) { + if (queryDone) { + break; + } + + taosUsleep(10000); + } atomic_store_32(&schtStartFetch, 1); @@ -667,8 +642,9 @@ TEST(queryTest, normalCase) { schtSetPlanToString(); schtSetExecNode(); schtSetAsyncSendMsgToServer(); - - code = schedulerAsyncExecJob(mockPointer, qnodeList, &dag, "select * from tb", &job); + + int32_t queryDone = 0; + code = schedulerAsyncExecJob(mockPointer, qnodeList, &dag, &job, "select * from tb", 0, schtQueryCb, &queryDone); ASSERT_EQ(code, 0); @@ -685,17 +661,6 @@ TEST(queryTest, normalCase) { pIter = taosHashIterate(pJob->execTasks, pIter); } - pIter = taosHashIterate(pJob->execTasks, NULL); - while (pIter) { - SSchTask *task = *(SSchTask **)pIter; - - SResReadyRsp rsp = {0}; - code = schHandleResponseMsg(pJob, task, TDMT_VND_RES_READY_RSP, (char *)&rsp, sizeof(rsp), 0); - printf("code:%d", code); - ASSERT_EQ(code, 0); - pIter = taosHashIterate(pJob->execTasks, pIter); - } - pIter = taosHashIterate(pJob->execTasks, NULL); while (pIter) { SSchTask *task = *(SSchTask **)pIter; @@ -707,17 +672,14 @@ TEST(queryTest, normalCase) { pIter = taosHashIterate(pJob->execTasks, pIter); } - pIter = taosHashIterate(pJob->execTasks, NULL); - while (pIter) { - SSchTask *task = *(SSchTask **)pIter; - - SResReadyRsp rsp = {0}; - code = schHandleResponseMsg(pJob, task, TDMT_VND_RES_READY_RSP, (char *)&rsp, sizeof(rsp), 0); - ASSERT_EQ(code, 0); - - pIter = taosHashIterate(pJob->execTasks, pIter); - } + while (true) { + if (queryDone) { + break; + } + taosUsleep(10000); + } + TdThreadAttr thattr; taosThreadAttrInit(&thattr); @@ -773,25 +735,15 @@ TEST(queryTest, readyFirstCase) { schtSetPlanToString(); schtSetExecNode(); schtSetAsyncSendMsgToServer(); - - code = schedulerAsyncExecJob(mockPointer, qnodeList, &dag, "select * from tb", &job); + + int32_t queryDone = 0; + code = schedulerAsyncExecJob(mockPointer, qnodeList, &dag, &job, "select * from tb", 0, schtQueryCb, &queryDone); ASSERT_EQ(code, 0); SSchJob *pJob = schAcquireJob(job); - - void *pIter = taosHashIterate(pJob->execTasks, NULL); - while (pIter) { - SSchTask *task = *(SSchTask **)pIter; - - SResReadyRsp rsp = {0}; - code = schHandleResponseMsg(pJob, task, TDMT_VND_RES_READY_RSP, (char *)&rsp, sizeof(rsp), 0); - printf("code:%d", code); - ASSERT_EQ(code, 0); - pIter = taosHashIterate(pJob->execTasks, pIter); - } - pIter = taosHashIterate(pJob->execTasks, NULL); + void *pIter = taosHashIterate(pJob->execTasks, NULL); while (pIter) { SSchTask *task = *(SSchTask **)pIter; @@ -802,17 +754,6 @@ TEST(queryTest, readyFirstCase) { pIter = taosHashIterate(pJob->execTasks, pIter); } - pIter = taosHashIterate(pJob->execTasks, NULL); - while (pIter) { - SSchTask *task = *(SSchTask **)pIter; - - SResReadyRsp rsp = {0}; - code = schHandleResponseMsg(pJob, task, TDMT_VND_RES_READY_RSP, (char *)&rsp, sizeof(rsp), 0); - ASSERT_EQ(code, 0); - - pIter = taosHashIterate(pJob->execTasks, pIter); - } - pIter = taosHashIterate(pJob->execTasks, NULL); while (pIter) { SSchTask *task = *(SSchTask **)pIter; @@ -824,6 +765,13 @@ TEST(queryTest, readyFirstCase) { pIter = taosHashIterate(pJob->execTasks, pIter); } + while (true) { + if (queryDone) { + break; + } + + taosUsleep(10000); + } TdThreadAttr thattr; @@ -885,16 +833,17 @@ TEST(queryTest, flowCtrlCase) { schtSetPlanToString(); schtSetExecNode(); schtSetAsyncSendMsgToServer(); - - code = schedulerAsyncExecJob(mockPointer, qnodeList, &dag, "select * from tb", &job); + + int32_t queryDone = 0; + code = schedulerAsyncExecJob(mockPointer, qnodeList, &dag, &job, "select * from tb", 0, schtQueryCb, &queryDone); ASSERT_EQ(code, 0); SSchJob *pJob = schAcquireJob(job); - bool queryDone = false; + bool qDone = false; - while (!queryDone) { + while (!qDone) { void *pIter = taosHashIterate(pJob->execTasks, NULL); if (NULL == pIter) { break; @@ -909,13 +858,9 @@ TEST(queryTest, flowCtrlCase) { SQueryTableRsp rsp = {0}; code = schHandleResponseMsg(pJob, task, TDMT_VND_QUERY_RSP, (char *)&rsp, sizeof(rsp), 0); - ASSERT_EQ(code, 0); - } else if (task->lastMsgType == TDMT_VND_RES_READY) { - SResReadyRsp rsp = {0}; - code = schHandleResponseMsg(pJob, task, TDMT_VND_RES_READY_RSP, (char *)&rsp, sizeof(rsp), 0); ASSERT_EQ(code, 0); } else { - queryDone = true; + qDone = true; break; } @@ -923,6 +868,13 @@ TEST(queryTest, flowCtrlCase) { } } + while (true) { + if (queryDone) { + break; + } + + taosUsleep(10000); + } TdThreadAttr thattr; taosThreadAttrInit(&thattr); diff --git a/source/libs/stream/src/tstream.c b/source/libs/stream/src/tstream.c index 66a661481e8f751b7a3a030bc7b85b38c75040d5..e033645667b2f1a3953feaaaca8daf4ed4331bf8 100644 --- a/source/libs/stream/src/tstream.c +++ b/source/libs/stream/src/tstream.c @@ -35,6 +35,14 @@ void* streamDataBlockDecode(const void* buf, SStreamDataBlock* pInput) { return (void*)buf; } +static int32_t streamBuildDispatchMsg(SStreamTask* pTask, SArray* data, SRpcMsg* pMsg, SEpSet** ppEpSet) { + SStreamDispatchReq req = { + .streamId = pTask->streamId, + .data = data, + }; + return 0; +} + static int32_t streamBuildExecMsg(SStreamTask* pTask, SArray* data, SRpcMsg* pMsg, SEpSet** ppEpSet) { SStreamTaskExecReq req = { .streamId = pTask->streamId, @@ -59,7 +67,7 @@ static int32_t streamBuildExecMsg(SStreamTask* pTask, SArray* data, SRpcMsg* pMs } else if (pTask->dispatchType == TASK_DISPATCH__SHUFFLE) { // TODO use general name rule of schemaless - char ctbName[TSDB_TABLE_FNAME_LEN + 22]; + char ctbName[TSDB_TABLE_FNAME_LEN + 22] = {0}; // all groupId must be the same in an array SSDataBlock* pBlock = taosArrayGet(data, 0); sprintf(ctbName, "%s:%ld", pTask->shuffleDispatcher.stbFullName, pBlock->info.groupId); @@ -134,20 +142,20 @@ int32_t streamEnqueueDataBlk(SStreamTask* pTask, SStreamDataBlock* input) { } static int32_t streamTaskExecImpl(SStreamTask* pTask, void* data, SArray* pRes) { - void* exec = pTask->exec.runners[0].executor; + void* exec = pTask->exec.executor; // set input if (pTask->inputType == STREAM_INPUT__DATA_SUBMIT) { SStreamDataSubmit* pSubmit = (SStreamDataSubmit*)data; ASSERT(pSubmit->type == STREAM_INPUT__DATA_SUBMIT); - qSetStreamInput(exec, pSubmit->data, STREAM_DATA_TYPE_SUBMIT_BLOCK); + qSetStreamInput(exec, pSubmit->data, STREAM_DATA_TYPE_SUBMIT_BLOCK, false); } else if (pTask->inputType == STREAM_INPUT__DATA_BLOCK) { SStreamDataBlock* pBlock = (SStreamDataBlock*)data; ASSERT(pBlock->type == STREAM_INPUT__DATA_BLOCK); SArray* blocks = pBlock->blocks; - qSetMultiStreamInput(exec, blocks->pData, blocks->size, STREAM_DATA_TYPE_SSDATA_BLOCK); + qSetMultiStreamInput(exec, blocks->pData, blocks->size, STREAM_DATA_TYPE_SSDATA_BLOCK, false); } // exec @@ -158,109 +166,65 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, void* data, SArray* pRes) ASSERT(false); } if (output == NULL) break; - taosArrayPush(pRes, output); + // TODO: do we need free memory? + SSDataBlock* outputCopy = createOneDataBlock(output, true); + taosArrayPush(pRes, outputCopy); } // destroy if (pTask->inputType == STREAM_INPUT__DATA_SUBMIT) { streamDataSubmitRefDec((SStreamDataSubmit*)data); + taosFreeQitem(data); } else { taosArrayDestroyEx(((SStreamDataBlock*)data)->blocks, (FDelete)tDeleteSSDataBlock); + taosFreeQitem(data); } return 0; } +static SArray* streamExecForQall(SStreamTask* pTask, SArray* pRes) { + while (1) { + void* data = NULL; + taosGetQitem(pTask->inputQAll, &data); + if (data == NULL) break; + + streamTaskExecImpl(pTask, data, pRes); + + if (taosArrayGetSize(pRes) != 0) { + SStreamDataBlock* qRes = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM); + qRes->type = STREAM_INPUT__DATA_BLOCK; + qRes->blocks = pRes; + taosWriteQitem(pTask->outputQ, qRes); + return taosArrayInit(0, sizeof(SSDataBlock)); + } + } + return pRes; +} + // TODO: handle version -int32_t streamTaskExec2(SStreamTask* pTask, SMsgCb* pMsgCb) { +int32_t streamExec(SStreamTask* pTask, SMsgCb* pMsgCb) { SArray* pRes = taosArrayInit(0, sizeof(SSDataBlock)); if (pRes == NULL) return -1; while (1) { int8_t execStatus = atomic_val_compare_exchange_8(&pTask->status, TASK_STATUS__IDLE, TASK_STATUS__EXECUTING); - void* exec = pTask->exec.runners[0].executor; + void* exec = pTask->exec.executor; if (execStatus == TASK_STATUS__IDLE) { // first run, from qall, handle failure from last exec - while (1) { - void* data = NULL; - taosGetQitem(pTask->inputQAll, &data); - if (data == NULL) break; - - streamTaskExecImpl(pTask, data, pRes); - - taosFreeQitem(data); - - if (taosArrayGetSize(pRes) != 0) { - SStreamDataBlock* resQ = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM); - resQ->type = STREAM_INPUT__DATA_BLOCK; - resQ->blocks = pRes; - taosWriteQitem(pTask->outputQ, resQ); - pRes = taosArrayInit(0, sizeof(SSDataBlock)); - if (pRes == NULL) goto FAIL; - } - } + pRes = streamExecForQall(pTask, pRes); + if (pRes == NULL) goto FAIL; + // second run, from inputQ taosReadAllQitems(pTask->inputQ, pTask->inputQAll); - while (1) { - void* data = NULL; - taosGetQitem(pTask->inputQAll, &data); - if (data == NULL) break; - - streamTaskExecImpl(pTask, data, pRes); - - taosFreeQitem(data); - - if (taosArrayGetSize(pRes) != 0) { - SStreamDataBlock* resQ = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM); - resQ->type = STREAM_INPUT__DATA_BLOCK; - resQ->blocks = pRes; - taosWriteQitem(pTask->outputQ, resQ); - pRes = taosArrayInit(0, sizeof(SSDataBlock)); - if (pRes == NULL) goto FAIL; - } - } - // set status closing - atomic_store_8(&pTask->status, TASK_STATUS__CLOSING); - // third run, make sure all inputQ is cleared - taosReadAllQitems(pTask->inputQ, pTask->inputQAll); - while (1) { - void* data = NULL; - taosGetQitem(pTask->inputQAll, &data); - if (data == NULL) break; - - streamTaskExecImpl(pTask, data, pRes); - - taosFreeQitem(data); - - if (taosArrayGetSize(pRes) != 0) { - SStreamDataBlock* resQ = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM); - resQ->type = STREAM_INPUT__DATA_BLOCK; - resQ->blocks = pRes; - taosWriteQitem(pTask->outputQ, resQ); - pRes = taosArrayInit(0, sizeof(SSDataBlock)); - if (pRes == NULL) goto FAIL; - } - } + pRes = streamExecForQall(pTask, pRes); + if (pRes == NULL) goto FAIL; + // set status closing atomic_store_8(&pTask->status, TASK_STATUS__CLOSING); - // third run, make sure all inputQ is cleared + + // third run, make sure inputQ and qall are cleared taosReadAllQitems(pTask->inputQ, pTask->inputQAll); - while (1) { - void* data = NULL; - taosGetQitem(pTask->inputQAll, &data); - if (data == NULL) break; - - streamTaskExecImpl(pTask, data, pRes); - - taosFreeQitem(data); - - if (taosArrayGetSize(pRes) != 0) { - SStreamDataBlock* resQ = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM); - resQ->type = STREAM_INPUT__DATA_BLOCK; - resQ->blocks = pRes; - taosWriteQitem(pTask->outputQ, resQ); - pRes = taosArrayInit(0, sizeof(SSDataBlock)); - if (pRes == NULL) goto FAIL; - } - } + pRes = streamExecForQall(pTask, pRes); + if (pRes == NULL) goto FAIL; atomic_store_8(&pTask->status, TASK_STATUS__IDLE); break; @@ -278,7 +242,7 @@ FAIL: return -1; } -int32_t streamTaskSink(SStreamTask* pTask, SMsgCb* pMsgCb) { +int32_t streamSink(SStreamTask* pTask, SMsgCb* pMsgCb) { bool firstRun = 1; while (1) { SStreamDataBlock* pBlock = NULL; @@ -322,13 +286,13 @@ int32_t streamTaskSink(SStreamTask* pTask, SMsgCb* pMsgCb) { } int32_t qType; - if (pTask->dispatchMsgType == TDMT_VND_TASK_PIPE_EXEC || pTask->dispatchMsgType == TDMT_SND_TASK_PIPE_EXEC) { + if (pTask->dispatchMsgType == TDMT_VND_TASK_DISPATCH || pTask->dispatchMsgType == TDMT_SND_TASK_DISPATCH) { qType = FETCH_QUEUE; - } else if (pTask->dispatchMsgType == TDMT_VND_TASK_MERGE_EXEC || - pTask->dispatchMsgType == TDMT_SND_TASK_MERGE_EXEC) { - qType = MERGE_QUEUE; - } else if (pTask->dispatchMsgType == TDMT_VND_TASK_WRITE_EXEC) { - qType = WRITE_QUEUE; + /*} else if (pTask->dispatchMsgType == TDMT_VND_TASK_MERGE_EXEC ||*/ + /*pTask->dispatchMsgType == TDMT_SND_TASK_MERGE_EXEC) {*/ + /*qType = MERGE_QUEUE;*/ + /*} else if (pTask->dispatchMsgType == TDMT_VND_TASK_WRITE_EXEC) {*/ + /*qType = WRITE_QUEUE;*/ } else { ASSERT(0); } @@ -407,7 +371,7 @@ int32_t streamTaskEnqueue(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* return 0; } -int32_t streamTaskProcessDispatchReq(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamDispatchReq* pReq, SRpcMsg* pRsp) { +int32_t streamProcessDispatchReq(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamDispatchReq* pReq, SRpcMsg* pRsp) { // 1. handle input streamTaskEnqueue(pTask, pReq, pRsp); @@ -415,169 +379,59 @@ int32_t streamTaskProcessDispatchReq(SStreamTask* pTask, SMsgCb* pMsgCb, SStream // 2.1. idle: exec // 2.2. executing: return // 2.3. closing: keep trying - streamTaskExec2(pTask, pMsgCb); + streamExec(pTask, pMsgCb); // 3. handle output // 3.1 check and set status // 3.2 dispatch / sink - streamTaskSink(pTask, pMsgCb); + streamSink(pTask, pMsgCb); return 0; } -int32_t streamTaskProcessDispatchRsp(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamDispatchRsp* pRsp) { +int32_t streamProcessDispatchRsp(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamDispatchRsp* pRsp) { atomic_store_8(&pTask->inputStatus, pRsp->inputStatus); if (pRsp->inputStatus == TASK_INPUT_STATUS__BLOCKED) { // TODO: init recover timer } // continue dispatch - streamTaskSink(pTask, pMsgCb); + streamSink(pTask, pMsgCb); return 0; } int32_t streamTaskProcessRunReq(SStreamTask* pTask, SMsgCb* pMsgCb) { - streamTaskExec2(pTask, pMsgCb); - streamTaskSink(pTask, pMsgCb); + streamExec(pTask, pMsgCb); + streamSink(pTask, pMsgCb); return 0; } -int32_t streamTaskProcessRecoverReq(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamTaskRecoverReq* pReq, SRpcMsg* pMsg) { +int32_t streamProcessRecoverReq(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamTaskRecoverReq* pReq, SRpcMsg* pMsg) { // return 0; } -int32_t streamTaskProcessRecoverRsp(SStreamTask* pTask, SStreamTaskRecoverRsp* pRsp) { +int32_t streamProcessRecoverRsp(SStreamTask* pTask, SStreamTaskRecoverRsp* pRsp) { // return 0; } -int32_t streamExecTask(SStreamTask* pTask, SMsgCb* pMsgCb, const void* input, int32_t inputType, int32_t workId) { - SArray* pRes = NULL; - // source - if (inputType == STREAM_DATA_TYPE_SUBMIT_BLOCK && pTask->sourceType != TASK_SOURCE__SCAN) return 0; - - // exec - if (pTask->execType != TASK_EXEC__NONE) { - ASSERT(workId < pTask->exec.numOfRunners); - void* exec = pTask->exec.runners[workId].executor; - pRes = taosArrayInit(0, sizeof(SSDataBlock)); - if (pRes == NULL) { - return -1; - } - if (inputType == STREAM_DATA_TYPE_SUBMIT_BLOCK) { - qSetStreamInput(exec, input, inputType); - while (1) { - SSDataBlock* output; - uint64_t ts; - if (qExecTask(exec, &output, &ts) < 0) { - ASSERT(false); - } - if (output == NULL) { - break; - } - taosArrayPush(pRes, output); - } - } else if (inputType == STREAM_DATA_TYPE_SSDATA_BLOCK) { - const SArray* blocks = (const SArray*)input; - /*int32_t sz = taosArrayGetSize(blocks);*/ - /*for (int32_t i = 0; i < sz; i++) {*/ - /*SSDataBlock* pBlock = taosArrayGet(blocks, i);*/ - /*qSetStreamInput(exec, pBlock, inputType);*/ - qSetMultiStreamInput(exec, blocks->pData, blocks->size, STREAM_DATA_TYPE_SSDATA_BLOCK); - while (1) { - SSDataBlock* output; - uint64_t ts; - if (qExecTask(exec, &output, &ts) < 0) { - ASSERT(false); - } - if (output == NULL) { - break; - } - taosArrayPush(pRes, output); - } - /*}*/ - } else { - ASSERT(0); - } - } else { - ASSERT(inputType == STREAM_DATA_TYPE_SSDATA_BLOCK); - pRes = (SArray*)input; - } - - if (pRes == NULL || taosArrayGetSize(pRes) == 0) return 0; - - // sink - if (pTask->sinkType == TASK_SINK__TABLE) { - // blockDebugShowData(pRes); - pTask->tbSink.tbSinkFunc(pTask, pTask->tbSink.vnode, 0, pRes); - } else if (pTask->sinkType == TASK_SINK__SMA) { - pTask->smaSink.smaSink(pTask->ahandle, pTask->smaSink.smaId, pRes); - // - } else if (pTask->sinkType == TASK_SINK__FETCH) { - // - } else { - ASSERT(pTask->sinkType == TASK_SINK__NONE); - } - - // dispatch - - if (pTask->dispatchType == TASK_DISPATCH__INPLACE) { - SRpcMsg dispatchMsg = {0}; - if (streamBuildExecMsg(pTask, pRes, &dispatchMsg, NULL) < 0) { - ASSERT(0); - return -1; - } - - int32_t qType; - if (pTask->dispatchMsgType == TDMT_VND_TASK_PIPE_EXEC || pTask->dispatchMsgType == TDMT_SND_TASK_PIPE_EXEC) { - qType = FETCH_QUEUE; - } else if (pTask->dispatchMsgType == TDMT_VND_TASK_MERGE_EXEC || - pTask->dispatchMsgType == TDMT_SND_TASK_MERGE_EXEC) { - qType = MERGE_QUEUE; - } else if (pTask->dispatchMsgType == TDMT_VND_TASK_WRITE_EXEC) { - qType = WRITE_QUEUE; - } else { - ASSERT(0); - } - tmsgPutToQueue(pMsgCb, qType, &dispatchMsg); - - } else if (pTask->dispatchType == TASK_DISPATCH__FIXED) { - SRpcMsg dispatchMsg = {0}; - SEpSet* pEpSet = NULL; - if (streamBuildExecMsg(pTask, pRes, &dispatchMsg, &pEpSet) < 0) { - ASSERT(0); - return -1; - } - - tmsgSendReq(pEpSet, &dispatchMsg); - - } else if (pTask->dispatchType == TASK_DISPATCH__SHUFFLE) { - SHashObj* pShuffleRes = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK); - if (pShuffleRes == NULL) { - return -1; - } - - int32_t sz = taosArrayGetSize(pRes); - for (int32_t i = 0; i < sz; i++) { - SSDataBlock* pDataBlock = taosArrayGet(pRes, i); - SArray* pArray = taosHashGet(pShuffleRes, &pDataBlock->info.groupId, sizeof(int64_t)); - if (pArray == NULL) { - pArray = taosArrayInit(0, sizeof(SSDataBlock)); - if (pArray == NULL) { - return -1; - } - taosHashPut(pShuffleRes, &pDataBlock->info.groupId, sizeof(int64_t), &pArray, sizeof(void*)); - } - taosArrayPush(pArray, pDataBlock); - } - - if (streamShuffleDispatch(pTask, pMsgCb, pShuffleRes) < 0) { - return -1; - } +int32_t tEncodeStreamDispatchReq(SEncoder* pEncoder, const SStreamDispatchReq* pReq) { + if (tStartEncode(pEncoder) < 0) return -1; + if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1; + if (tEncodeI32(pEncoder, pReq->taskId) < 0) return -1; + if (tEncodeI32(pEncoder, pReq->sourceTaskId) < 0) return -1; + if (tEncodeI32(pEncoder, pReq->sourceVg) < 0) return -1; + tEndEncode(pEncoder); + return 0; +} - } else { - ASSERT(pTask->dispatchType == TASK_DISPATCH__NONE); - } +int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq) { + if (tStartDecode(pDecoder) < 0) return -1; + if (tDecodeI64(pDecoder, &pReq->streamId) < 0) return -1; + if (tDecodeI32(pDecoder, &pReq->taskId) < 0) return -1; + if (tDecodeI32(pDecoder, &pReq->sourceTaskId) < 0) return -1; + if (tDecodeI32(pDecoder, &pReq->sourceVg) < 0) return -1; + tEndDecode(pDecoder); return 0; } @@ -607,20 +461,7 @@ SStreamTask* tNewSStreamTask(int64_t streamId) { pTask->streamId = streamId; pTask->status = TASK_STATUS__IDLE; - pTask->inputQ = taosOpenQueue(); - pTask->outputQ = taosOpenQueue(); - pTask->inputQAll = taosAllocateQall(); - pTask->outputQAll = taosAllocateQall(); - if (pTask->inputQ == NULL || pTask->outputQ == NULL || pTask->inputQAll == NULL || pTask->outputQAll == NULL) - goto FAIL; return pTask; -FAIL: - if (pTask->inputQ) taosCloseQueue(pTask->inputQ); - if (pTask->outputQ) taosCloseQueue(pTask->outputQ); - if (pTask->inputQAll) taosFreeQall(pTask->inputQAll); - if (pTask->outputQAll) taosFreeQall(pTask->outputQAll); - if (pTask) taosMemoryFree(pTask); - return NULL; } int32_t tEncodeSStreamTask(SEncoder* pEncoder, const SStreamTask* pTask) { @@ -645,6 +486,7 @@ int32_t tEncodeSStreamTask(SEncoder* pEncoder, const SStreamTask* pTask) { if (pTask->sinkType == TASK_SINK__TABLE) { if (tEncodeI64(pEncoder, pTask->tbSink.stbUid) < 0) return -1; + if (tEncodeCStr(pEncoder, pTask->tbSink.stbFullName) < 0) return -1; if (tEncodeSSchemaWrapper(pEncoder, pTask->tbSink.pSchemaWrapper) < 0) return -1; } else if (pTask->sinkType == TASK_SINK__SMA) { if (tEncodeI64(pEncoder, pTask->smaSink.smaId) < 0) return -1; @@ -691,6 +533,7 @@ int32_t tDecodeSStreamTask(SDecoder* pDecoder, SStreamTask* pTask) { if (pTask->sinkType == TASK_SINK__TABLE) { if (tDecodeI64(pDecoder, &pTask->tbSink.stbUid) < 0) return -1; + if (tDecodeCStrTo(pDecoder, pTask->tbSink.stbFullName) < 0) return -1; pTask->tbSink.pSchemaWrapper = taosMemoryCalloc(1, sizeof(SSchemaWrapper)); if (pTask->tbSink.pSchemaWrapper == NULL) return -1; if (tDecodeSSchemaWrapper(pDecoder, pTask->tbSink.pSchemaWrapper) < 0) return -1; @@ -722,11 +565,7 @@ void tFreeSStreamTask(SStreamTask* pTask) { taosCloseQueue(pTask->outputQ); // TODO if (pTask->exec.qmsg) taosMemoryFree(pTask->exec.qmsg); - for (int32_t i = 0; i < pTask->exec.numOfRunners; i++) { - qDestroyTask(pTask->exec.runners[i].executor); - } - taosMemoryFree(pTask->exec.runners); - /*taosMemoryFree(pTask->executor);*/ + qDestroyTask(pTask->exec.executor); taosMemoryFree(pTask); } diff --git a/source/libs/stream/src/tstreamUpdate.c b/source/libs/stream/src/tstreamUpdate.c index 7921193fa2ca0500afc1467f0e05af267a21aa6c..7587fcecc99962b2cd0eda135a121acb281a1a48 100644 --- a/source/libs/stream/src/tstreamUpdate.c +++ b/source/libs/stream/src/tstreamUpdate.c @@ -19,18 +19,24 @@ #define DEFAULT_FALSE_POSITIVE 0.01 #define DEFAULT_BUCKET_SIZE 1024 #define ROWS_PER_MILLISECOND 1 -#define MAX_NUM_SCALABLE_BF 120 +#define MAX_NUM_SCALABLE_BF 100000 #define MIN_NUM_SCALABLE_BF 10 #define DEFAULT_PREADD_BUCKET 1 #define MAX_INTERVAL MILLISECOND_PER_MINUTE #define MIN_INTERVAL (MILLISECOND_PER_SECOND * 10) +#define DEFAULT_EXPECTED_ENTRIES 10000 + +static int64_t adjustExpEntries(int64_t entries) { + return TMIN(DEFAULT_EXPECTED_ENTRIES, entries); +} static void windowSBfAdd(SUpdateInfo *pInfo, uint64_t count) { if (pInfo->numSBFs < count) { count = pInfo->numSBFs; } for (uint64_t i = 0; i < count; ++i) { - SScalableBf *tsSBF = tScalableBfInit(pInfo->interval * ROWS_PER_MILLISECOND, DEFAULT_FALSE_POSITIVE); + int64_t rows = adjustExpEntries(pInfo->interval * ROWS_PER_MILLISECOND); + SScalableBf *tsSBF = tScalableBfInit(rows, DEFAULT_FALSE_POSITIVE); taosArrayPush(pInfo->pTsSBFs, &tsSBF); } } @@ -38,9 +44,9 @@ static void windowSBfAdd(SUpdateInfo *pInfo, uint64_t count) { static void windowSBfDelete(SUpdateInfo *pInfo, uint64_t count) { if (count < pInfo->numSBFs - 1) { for (uint64_t i = 0; i < count; ++i) { - SScalableBf *pTsSBFs = taosArrayGetP(pInfo->pTsSBFs, i); + SScalableBf *pTsSBFs = taosArrayGetP(pInfo->pTsSBFs, 0); tScalableBfDestroy(pTsSBFs); - taosArrayRemove(pInfo->pTsSBFs, i); + taosArrayRemove(pInfo->pTsSBFs, 0); } } else { taosArrayClearP(pInfo->pTsSBFs, (FDelete)tScalableBfDestroy); @@ -66,12 +72,14 @@ static int64_t adjustInterval(int64_t interval, int32_t precision) { return val; } -static int64_t adjustWatermark(int64_t interval, int32_t watermark) { - if (watermark <= 0 || watermark > MAX_NUM_SCALABLE_BF * interval) { - watermark = MAX_NUM_SCALABLE_BF * interval; - } else if (watermark < MIN_NUM_SCALABLE_BF * interval) { - watermark = MIN_NUM_SCALABLE_BF * interval; - } +static int64_t adjustWatermark(int64_t adjInterval, int64_t originInt, int64_t watermark) { + if (watermark <= 0) { + watermark = TMIN(originInt/adjInterval, 1) * adjInterval; + } else if (watermark > MAX_NUM_SCALABLE_BF * adjInterval) { + watermark = MAX_NUM_SCALABLE_BF * adjInterval; + }/* else if (watermark < MIN_NUM_SCALABLE_BF * adjInterval) { + watermark = MIN_NUM_SCALABLE_BF * adjInterval; + }*/ // Todo(liuyao) save window info to tdb return watermark; } @@ -88,7 +96,7 @@ SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t waterma pInfo->pTsSBFs = NULL; pInfo->minTS = -1; pInfo->interval = adjustInterval(interval, precision); - pInfo->watermark = adjustWatermark(pInfo->interval, watermark); + pInfo->watermark = adjustWatermark(pInfo->interval, interval, watermark); uint64_t bfSize = (uint64_t)(pInfo->watermark / pInfo->interval); @@ -121,7 +129,10 @@ static SScalableBf *getSBf(SUpdateInfo *pInfo, TSKEY ts) { if (pInfo->minTS < 0) { pInfo->minTS = (TSKEY)(ts / pInfo->interval * pInfo->interval); } - uint64_t index = (uint64_t)((ts - pInfo->minTS) / pInfo->interval); + int64_t index = (int64_t)((ts - pInfo->minTS) / pInfo->interval); + if (index < 0) { + return NULL; + } if (index >= pInfo->numSBFs) { uint64_t count = index + 1 - pInfo->numSBFs; windowSBfDelete(pInfo, count); @@ -130,7 +141,8 @@ static SScalableBf *getSBf(SUpdateInfo *pInfo, TSKEY ts) { } SScalableBf *res = taosArrayGetP(pInfo->pTsSBFs, index); if (res == NULL) { - res = tScalableBfInit(pInfo->interval * ROWS_PER_MILLISECOND, DEFAULT_FALSE_POSITIVE); + int64_t rows = adjustExpEntries(pInfo->interval * ROWS_PER_MILLISECOND); + res = tScalableBfInit(rows, DEFAULT_FALSE_POSITIVE); taosArrayPush(pInfo->pTsSBFs, &res); } return res; @@ -139,13 +151,18 @@ static SScalableBf *getSBf(SUpdateInfo *pInfo, TSKEY ts) { bool updateInfoIsUpdated(SUpdateInfo *pInfo, tb_uid_t tableId, TSKEY ts) { int32_t res = TSDB_CODE_FAILED; uint64_t index = ((uint64_t)tableId) % pInfo->numBuckets; + TSKEY maxTs = *(TSKEY *)taosArrayGet(pInfo->pTsBuckets, index); + if (ts < maxTs - pInfo->watermark) { + // this window has been closed. + return true; + } + SScalableBf *pSBf = getSBf(pInfo, ts); // pSBf may be a null pointer if (pSBf) { res = tScalableBfPut(pSBf, &ts, sizeof(TSKEY)); } - TSKEY maxTs = *(TSKEY *)taosArrayGet(pInfo->pTsBuckets, index); if (maxTs < ts) { taosArraySet(pInfo->pTsBuckets, index, &ts); return false; diff --git a/source/libs/stream/test/tstreamUpdateTest.cpp b/source/libs/stream/test/tstreamUpdateTest.cpp index c1e4e2bec1e68c6072e3393140b8a806a8504e9a..93e114db020591dc703b2876cbecc25e1c363011 100644 --- a/source/libs/stream/test/tstreamUpdateTest.cpp +++ b/source/libs/stream/test/tstreamUpdateTest.cpp @@ -4,6 +4,7 @@ #include "ttime.h" using namespace std; +#define MAX_NUM_SCALABLE_BF 100000 TEST(TD_STREAM_UPDATE_TEST, update) { int64_t interval = 20 * 1000; @@ -91,11 +92,11 @@ TEST(TD_STREAM_UPDATE_TEST, update) { } SUpdateInfo *pSU4 = updateInfoInit(-1, TSDB_TIME_PRECISION_MILLI, -1); - GTEST_ASSERT_EQ(pSU4->watermark, 120 * pSU4->interval); + GTEST_ASSERT_EQ(pSU4->watermark, MAX_NUM_SCALABLE_BF * pSU4->interval); GTEST_ASSERT_EQ(pSU4->interval, MILLISECOND_PER_MINUTE); SUpdateInfo *pSU5 = updateInfoInit(0, TSDB_TIME_PRECISION_MILLI, 0); - GTEST_ASSERT_EQ(pSU5->watermark, 120 * pSU4->interval); + GTEST_ASSERT_EQ(pSU5->watermark, MAX_NUM_SCALABLE_BF * pSU4->interval); GTEST_ASSERT_EQ(pSU5->interval, MILLISECOND_PER_MINUTE); diff --git a/source/libs/sync/inc/syncIO.h b/source/libs/sync/inc/syncIO.h index f65a31769420d6cf584d2079f1b147e510f3bdb6..b69c087b5fbf9a343ea51d8846a7c8e929c42265 100644 --- a/source/libs/sync/inc/syncIO.h +++ b/source/libs/sync/inc/syncIO.h @@ -36,8 +36,8 @@ typedef struct SSyncIO { STaosQueue *pMsgQ; STaosQset * pQset; TdThread consumerTid; - void *serverRpc; - void *clientRpc; + void * serverRpc; + void * clientRpc; SEpSet myAddr; SMsgCb msgcb; diff --git a/source/libs/sync/inc/syncInt.h b/source/libs/sync/inc/syncInt.h index 768e1c1cf1b55486dea6c98dae7e6df9ed2f891a..4100aa021672602bd55738067febe80db7790e11 100644 --- a/source/libs/sync/inc/syncInt.h +++ b/source/libs/sync/inc/syncInt.h @@ -55,6 +55,8 @@ typedef struct SVotesRespond SVotesRespond; typedef struct SSyncIndexMgr SSyncIndexMgr; typedef struct SRaftCfg SRaftCfg; typedef struct SSyncRespMgr SSyncRespMgr; +typedef struct SSyncSnapshotSender SSyncSnapshotSender; +typedef struct SSyncSnapshotReceiver SSyncSnapshotReceiver; typedef struct SSyncNode { // init by SSyncInfo @@ -147,6 +149,13 @@ typedef struct SSyncNode { // tools SSyncRespMgr* pSyncRespMgr; + // restore state + // sem_t restoreSem; + bool restoreFinish; + SSnapshot* pSnapshot; + SSyncSnapshotSender* pSender; + SSyncSnapshotReceiver* pReceiver; + } SSyncNode; // open/close -------------- @@ -177,7 +186,7 @@ int32_t syncNodeSendMsgByInfo(const SNodeInfo* nodeInfo, SSyncNode* pSyncNode, S cJSON* syncNode2Json(const SSyncNode* pSyncNode); char* syncNode2Str(const SSyncNode* pSyncNode); char* syncNode2SimpleStr(const SSyncNode* pSyncNode); -void syncNodeUpdateConfig(SSyncNode* pSyncNode, SSyncCfg* newConfig); +void syncNodeUpdateConfig(SSyncNode* pSyncNode, SSyncCfg* newConfig, bool* isDrop); SSyncNode* syncNodeAcquire(int64_t rid); void syncNodeRelease(SSyncNode* pNode); diff --git a/source/libs/sync/inc/syncRaftCfg.h b/source/libs/sync/inc/syncRaftCfg.h index bfc64cb7b6b02f4a693ccc82117f57c77bf7f82c..1061e8bdc4b248511eb3a580b76056cbc830f02b 100644 --- a/source/libs/sync/inc/syncRaftCfg.h +++ b/source/libs/sync/inc/syncRaftCfg.h @@ -27,10 +27,13 @@ extern "C" { #include "syncInt.h" #include "taosdef.h" +#define CONFIG_FILE_LEN 1024 + typedef struct SRaftCfg { SSyncCfg cfg; TdFilePtr pFile; char path[TSDB_FILENAME_LEN * 2]; + int8_t isStandBy; } SRaftCfg; SRaftCfg *raftCfgOpen(const char *path); @@ -42,10 +45,12 @@ char * syncCfg2Str(SSyncCfg *pSyncCfg); int32_t syncCfgFromJson(const cJSON *pRoot, SSyncCfg *pSyncCfg); int32_t syncCfgFromStr(const char *s, SSyncCfg *pSyncCfg); -cJSON *raftCfg2Json(SRaftCfg *pRaftCfg); -char * raftCfg2Str(SRaftCfg *pRaftCfg); +cJSON * raftCfg2Json(SRaftCfg *pRaftCfg); +char * raftCfg2Str(SRaftCfg *pRaftCfg); +int32_t raftCfgFromJson(const cJSON *pRoot, SRaftCfg *pRaftCfg); +int32_t raftCfgFromStr(const char *s, SRaftCfg *pRaftCfg); -int32_t syncCfgCreateFile(SSyncCfg *pCfg, const char *path); +int32_t raftCfgCreateFile(SSyncCfg *pCfg, int8_t isStandBy, const char *path); // for debug ---------------------- void syncCfgPrint(SSyncCfg *pCfg); diff --git a/source/libs/sync/inc/syncRaftLog.h b/source/libs/sync/inc/syncRaftLog.h index 7db62e14d597608f04fd313e597251ec2503f933..df5cd3f36c4138e608e70bd22972d54baff48a50 100644 --- a/source/libs/sync/inc/syncRaftLog.h +++ b/source/libs/sync/inc/syncRaftLog.h @@ -32,20 +32,21 @@ typedef struct SSyncLogStoreData { SWal* pWal; } SSyncLogStoreData; -SSyncLogStore* logStoreCreate(SSyncNode* pSyncNode); -void logStoreDestory(SSyncLogStore* pLogStore); -int32_t logStoreAppendEntry(SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry); -SSyncRaftEntry* logStoreGetEntry(SSyncLogStore* pLogStore, SyncIndex index); -int32_t logStoreTruncate(SSyncLogStore* pLogStore, SyncIndex fromIndex); -SyncIndex logStoreLastIndex(SSyncLogStore* pLogStore); -SyncTerm logStoreLastTerm(SSyncLogStore* pLogStore); -int32_t logStoreUpdateCommitIndex(SSyncLogStore* pLogStore, SyncIndex index); -SyncIndex logStoreGetCommitIndex(SSyncLogStore* pLogStore); -SSyncRaftEntry* logStoreGetLastEntry(SSyncLogStore* pLogStore); -cJSON* logStore2Json(SSyncLogStore* pLogStore); -char* logStore2Str(SSyncLogStore* pLogStore); -cJSON* logStoreSimple2Json(SSyncLogStore* pLogStore); -char* logStoreSimple2Str(SSyncLogStore* pLogStore); +SSyncLogStore* logStoreCreate(SSyncNode* pSyncNode); +void logStoreDestory(SSyncLogStore* pLogStore); +cJSON* logStore2Json(SSyncLogStore* pLogStore); +char* logStore2Str(SSyncLogStore* pLogStore); +cJSON* logStoreSimple2Json(SSyncLogStore* pLogStore); +char* logStoreSimple2Str(SSyncLogStore* pLogStore); + +// SSyncRaftEntry* logStoreGetLastEntry(SSyncLogStore* pLogStore); +// SyncIndex logStoreLastIndex(SSyncLogStore* pLogStore); +// SyncTerm logStoreLastTerm(SSyncLogStore* pLogStore); +// SSyncRaftEntry* logStoreGetEntry(SSyncLogStore* pLogStore, SyncIndex index); +// int32_t logStoreAppendEntry(SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry); +// int32_t logStoreTruncate(SSyncLogStore* pLogStore, SyncIndex fromIndex); +// int32_t logStoreUpdateCommitIndex(SSyncLogStore* pLogStore, SyncIndex index); +// SyncIndex logStoreGetCommitIndex(SSyncLogStore* pLogStore); // for debug void logStorePrint(SSyncLogStore* pLogStore); diff --git a/source/libs/sync/inc/syncSnapshot.h b/source/libs/sync/inc/syncSnapshot.h index fd2119ce659b553124aa9a310c3790b29363628c..43d1c0c0c38bc9836fdb9e3210f141af44376700 100644 --- a/source/libs/sync/inc/syncSnapshot.h +++ b/source/libs/sync/inc/syncSnapshot.h @@ -23,11 +23,42 @@ extern "C" { #include #include #include +#include "cJSON.h" #include "syncInt.h" #include "taosdef.h" -int32_t takeSnapshot(SSyncFSM *pFsm, SSnapshot *pSnapshot); -int32_t restoreSnapshot(SSyncFSM *pFsm, SSnapshot *pSnapshot); +typedef struct SSyncSnapshotSender { + int32_t sending; + int32_t received; + bool finish; + void * pCurrentBlock; + int32_t blockLen; + int64_t sendingMS; + SSnapshot *pSnapshot; + SSyncNode *pSyncNode; +} SSyncSnapshotSender; + +SSyncSnapshotSender *snapshotSenderCreate(SSyncNode *pSyncNode); +void snapshotSenderDestroy(SSyncSnapshotSender *pSender); +int32_t snapshotSend(SSyncSnapshotSender *pSender); +cJSON * snapshotSender2Json(SSyncSnapshotSender *pSender); +char * snapshotSender2Str(SSyncSnapshotSender *pSender); + +typedef struct SSyncSnapshotReceiver { + bool start; + int32_t received; + int32_t progressIndex; + void * pCurrentBlock; + int32_t len; + SSnapshot *pSnapshot; + SSyncNode *pSyncNode; +} SSyncSnapshotReceiver; + +SSyncSnapshotReceiver *snapshotReceiverCreate(SSyncNode *pSyncNode); +void snapshotReceiverDestroy(SSyncSnapshotReceiver *pReceiver); +int32_t snapshotReceive(SSyncSnapshotReceiver *pReceiver); +cJSON * snapshotReceiver2Json(SSyncSnapshotReceiver *pReceiver); +char * snapshotReceiver2Str(SSyncSnapshotReceiver *pReceiver); #ifdef __cplusplus } diff --git a/source/libs/sync/inc/syncVoteMgr.h b/source/libs/sync/inc/syncVoteMgr.h index 5bc240e9219a8bd1402683e1025ee15f32048e6b..716d2f620c09bdf0b842f7661e5f238d2821644f 100644 --- a/source/libs/sync/inc/syncVoteMgr.h +++ b/source/libs/sync/inc/syncVoteMgr.h @@ -42,6 +42,7 @@ typedef struct SVotesGranted { SVotesGranted *voteGrantedCreate(SSyncNode *pSyncNode); void voteGrantedDestroy(SVotesGranted *pVotesGranted); +void voteGrantedUpdate(SVotesGranted *pVotesGranted, SSyncNode *pSyncNode); bool voteGrantedMajority(SVotesGranted *pVotesGranted); void voteGrantedVote(SVotesGranted *pVotesGranted, SyncRequestVoteReply *pMsg); void voteGrantedReset(SVotesGranted *pVotesGranted, SyncTerm term); @@ -65,6 +66,7 @@ typedef struct SVotesRespond { SVotesRespond *votesRespondCreate(SSyncNode *pSyncNode); void votesRespondDestory(SVotesRespond *pVotesRespond); +void votesRespondUpdate(SVotesRespond *pVotesRespond, SSyncNode *pSyncNode); bool votesResponded(SVotesRespond *pVotesRespond, const SRaftId *pRaftId); void votesRespondAdd(SVotesRespond *pVotesRespond, const SyncRequestVoteReply *pMsg); void votesRespondReset(SVotesRespond *pVotesRespond, SyncTerm term); diff --git a/source/libs/sync/src/syncAppendEntries.c b/source/libs/sync/src/syncAppendEntries.c index 1a5d418e7545122b48b15e1b83c4a2bef3d9a860..3afe7b15e213c0da3760c7a8ef1f313d145cd31f 100644 --- a/source/libs/sync/src/syncAppendEntries.c +++ b/source/libs/sync/src/syncAppendEntries.c @@ -89,7 +89,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) { int32_t ret = 0; - char logBuf[128]; + char logBuf[128] = {0}; snprintf(logBuf, sizeof(logBuf), "==syncNodeOnAppendEntriesCb== term:%lu", ths->pRaftStore->currentTerm); syncAppendEntriesLog2(logBuf, pMsg); @@ -107,7 +107,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) { SyncTerm localPreLogTerm = 0; if (pMsg->prevLogIndex >= SYNC_INDEX_BEGIN && pMsg->prevLogIndex <= ths->pLogStore->getLastIndex(ths->pLogStore)) { - SSyncRaftEntry* pEntry = logStoreGetEntry(ths->pLogStore, pMsg->prevLogIndex); + SSyncRaftEntry* pEntry = ths->pLogStore->getEntry(ths->pLogStore, pMsg->prevLogIndex); assert(pEntry != NULL); localPreLogTerm = pEntry->term; syncEntryDestory(pEntry); @@ -175,7 +175,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) { bool conflict = false; SyncIndex extraIndex = pMsg->prevLogIndex + 1; - SSyncRaftEntry* pExtraEntry = logStoreGetEntry(ths->pLogStore, extraIndex); + SSyncRaftEntry* pExtraEntry = ths->pLogStore->getEntry(ths->pLogStore, extraIndex); assert(pExtraEntry != NULL); SSyncRaftEntry* pAppendEntry = syncEntryDeserialize(pMsg->data, pMsg->dataLen); @@ -197,7 +197,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) { // notice! reverse roll back! for (SyncIndex index = delEnd; index >= delBegin; --index) { if (ths->pFsm->FpRollBackCb != NULL) { - SSyncRaftEntry* pRollBackEntry = logStoreGetEntry(ths->pLogStore, index); + SSyncRaftEntry* pRollBackEntry = ths->pLogStore->getEntry(ths->pLogStore, index); assert(pRollBackEntry != NULL); // if (pRollBackEntry->msgType != TDMT_VND_SYNC_NOOP) { @@ -324,7 +324,6 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) { SRpcMsg rpcMsg; syncEntry2OriginalRpc(pEntry, &rpcMsg); - // if (ths->pFsm->FpCommitCb != NULL && pEntry->originalRpcType != TDMT_VND_SYNC_NOOP) { if (ths->pFsm->FpCommitCb != NULL && syncUtilUserCommit(pEntry->originalRpcType)) { SFsmCbMeta cbMeta; cbMeta.index = pEntry->index; @@ -332,20 +331,88 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) { cbMeta.code = 0; cbMeta.state = ths->state; cbMeta.seqNum = pEntry->seqNum; - ths->pFsm->FpCommitCb(ths->pFsm, &rpcMsg, cbMeta); + cbMeta.term = pEntry->term; + cbMeta.currentTerm = ths->pRaftStore->currentTerm; + cbMeta.flag = 0x11; + + bool needExecute = true; + if (ths->pSnapshot != NULL && cbMeta.index <= ths->pSnapshot->lastApplyIndex) { + needExecute = false; + } + + if (needExecute) { + ths->pFsm->FpCommitCb(ths->pFsm, &rpcMsg, cbMeta); + } } // config change if (pEntry->originalRpcType == TDMT_VND_SYNC_CONFIG_CHANGE) { + SSyncCfg oldSyncCfg = ths->pRaftCfg->cfg; + SSyncCfg newSyncCfg; int32_t ret = syncCfgFromStr(rpcMsg.pCont, &newSyncCfg); ASSERT(ret == 0); - syncNodeUpdateConfig(ths, &newSyncCfg); - if (ths->state == TAOS_SYNC_STATE_LEADER) { - syncNodeBecomeLeader(ths); - } else { - syncNodeBecomeFollower(ths); + // update new config myIndex + bool hit = false; + for (int i = 0; i < newSyncCfg.replicaNum; ++i) { + if (strcmp(ths->myNodeInfo.nodeFqdn, (newSyncCfg.nodeInfo)[i].nodeFqdn) == 0 && + ths->myNodeInfo.nodePort == (newSyncCfg.nodeInfo)[i].nodePort) { + newSyncCfg.myIndex = i; + hit = true; + break; + } + } + + SReConfigCbMeta cbMeta = {0}; + bool isDrop; + + // I am in newConfig + if (hit) { + syncNodeUpdateConfig(ths, &newSyncCfg, &isDrop); + + // change isStandBy to normal + if (!isDrop) { + if (ths->state == TAOS_SYNC_STATE_LEADER) { + syncNodeBecomeLeader(ths); + } else { + syncNodeBecomeFollower(ths); + } + } + + char* sOld = syncCfg2Str(&oldSyncCfg); + char* sNew = syncCfg2Str(&newSyncCfg); + sInfo("==config change== 0x11 old:%s new:%s isDrop:%d \n", sOld, sNew, isDrop); + taosMemoryFree(sOld); + taosMemoryFree(sNew); + } + + // always call FpReConfigCb + if (ths->pFsm->FpReConfigCb != NULL) { + cbMeta.code = 0; + cbMeta.currentTerm = ths->pRaftStore->currentTerm; + cbMeta.index = pEntry->index; + cbMeta.term = pEntry->term; + cbMeta.oldCfg = oldSyncCfg; + cbMeta.flag = 0x11; + cbMeta.isDrop = isDrop; + ths->pFsm->FpReConfigCb(ths->pFsm, newSyncCfg, cbMeta); + } + } + + // restore finish + if (pEntry->index == ths->pLogStore->getLastIndex(ths->pLogStore)) { + if (ths->restoreFinish == false) { + if (ths->pFsm->FpRestoreFinishCb != NULL) { + ths->pFsm->FpRestoreFinishCb(ths->pFsm); + } + ths->restoreFinish = true; + sInfo("==syncNodeOnAppendEntriesCb== restoreFinish set true %p vgId:%d", ths, ths->vgId); + + /* + tsem_post(&ths->restoreSem); + sInfo("==syncNodeOnAppendEntriesCb== RestoreFinish tsem_post %p", ths); + */ } } diff --git a/source/libs/sync/src/syncAppendEntriesReply.c b/source/libs/sync/src/syncAppendEntriesReply.c index 77d85e29151205edd31deed1c40f5dbffca90849..4e6d870e194a223bd35d5671dc17532bd5e8626e 100644 --- a/source/libs/sync/src/syncAppendEntriesReply.c +++ b/source/libs/sync/src/syncAppendEntriesReply.c @@ -38,7 +38,7 @@ int32_t syncNodeOnAppendEntriesReplyCb(SSyncNode* ths, SyncAppendEntriesReply* pMsg) { int32_t ret = 0; - char logBuf[128]; + char logBuf[128] = {0}; snprintf(logBuf, sizeof(logBuf), "==syncNodeOnAppendEntriesReplyCb== term:%lu", ths->pRaftStore->currentTerm); syncAppendEntriesReplyLog2(logBuf, pMsg); @@ -57,7 +57,7 @@ int32_t syncNodeOnAppendEntriesReplyCb(SSyncNode* ths, SyncAppendEntriesReply* p // } if (pMsg->term > ths->pRaftStore->currentTerm) { - char logBuf[128]; + char logBuf[128] = {0}; snprintf(logBuf, sizeof(logBuf), "syncNodeOnAppendEntriesReplyCb error term, receive:%lu current:%lu", pMsg->term, ths->pRaftStore->currentTerm); syncNodeLog2(logBuf, ths); diff --git a/source/libs/sync/src/syncCommit.c b/source/libs/sync/src/syncCommit.c index 0f17cf267e8e8a54d6020f12b47bafb594c92434..4a1a40a2d7ddd47d9d6ec30a683f284dacc70fa7 100644 --- a/source/libs/sync/src/syncCommit.c +++ b/source/libs/sync/src/syncCommit.c @@ -102,7 +102,6 @@ void syncMaybeAdvanceCommitIndex(SSyncNode* pSyncNode) { SRpcMsg rpcMsg; syncEntry2OriginalRpc(pEntry, &rpcMsg); - // if (pSyncNode->pFsm->FpCommitCb != NULL && pEntry->originalRpcType != TDMT_VND_SYNC_NOOP) { if (pSyncNode->pFsm->FpCommitCb != NULL && syncUtilUserCommit(pEntry->originalRpcType)) { SFsmCbMeta cbMeta; cbMeta.index = pEntry->index; @@ -110,20 +109,87 @@ void syncMaybeAdvanceCommitIndex(SSyncNode* pSyncNode) { cbMeta.code = 0; cbMeta.state = pSyncNode->state; cbMeta.seqNum = pEntry->seqNum; - pSyncNode->pFsm->FpCommitCb(pSyncNode->pFsm, &rpcMsg, cbMeta); + cbMeta.term = pEntry->term; + cbMeta.currentTerm = pSyncNode->pRaftStore->currentTerm; + cbMeta.flag = 0x1; + + bool needExecute = true; + if (pSyncNode->pSnapshot != NULL && cbMeta.index <= pSyncNode->pSnapshot->lastApplyIndex) { + needExecute = false; + } + + if (needExecute) { + pSyncNode->pFsm->FpCommitCb(pSyncNode->pFsm, &rpcMsg, cbMeta); + } } // config change if (pEntry->originalRpcType == TDMT_VND_SYNC_CONFIG_CHANGE) { + SSyncCfg oldSyncCfg = pSyncNode->pRaftCfg->cfg; + SSyncCfg newSyncCfg; int32_t ret = syncCfgFromStr(rpcMsg.pCont, &newSyncCfg); ASSERT(ret == 0); - syncNodeUpdateConfig(pSyncNode, &newSyncCfg); + // update new config myIndex + bool hit = false; + for (int i = 0; i < newSyncCfg.replicaNum; ++i) { + if (strcmp(pSyncNode->myNodeInfo.nodeFqdn, (newSyncCfg.nodeInfo)[i].nodeFqdn) == 0 && + pSyncNode->myNodeInfo.nodePort == (newSyncCfg.nodeInfo)[i].nodePort) { + newSyncCfg.myIndex = i; + hit = true; + break; + } + } + if (pSyncNode->state == TAOS_SYNC_STATE_LEADER) { - syncNodeBecomeLeader(pSyncNode); - } else { - syncNodeBecomeFollower(pSyncNode); + ASSERT(hit == true); + } + + bool isDrop; + syncNodeUpdateConfig(pSyncNode, &newSyncCfg, &isDrop); + + // change isStandBy to normal + if (!isDrop) { + if (pSyncNode->state == TAOS_SYNC_STATE_LEADER) { + syncNodeBecomeLeader(pSyncNode); + } else { + syncNodeBecomeFollower(pSyncNode); + } + } + + char* sOld = syncCfg2Str(&oldSyncCfg); + char* sNew = syncCfg2Str(&newSyncCfg); + sInfo("==config change== 0x1 old:%s new:%s isDrop:%d \n", sOld, sNew, isDrop); + taosMemoryFree(sOld); + taosMemoryFree(sNew); + + if (pSyncNode->pFsm->FpReConfigCb != NULL) { + SReConfigCbMeta cbMeta = {0}; + cbMeta.code = 0; + cbMeta.currentTerm = pSyncNode->pRaftStore->currentTerm; + cbMeta.index = pEntry->index; + cbMeta.term = pEntry->term; + cbMeta.oldCfg = oldSyncCfg; + cbMeta.flag = 0x1; + cbMeta.isDrop = isDrop; + pSyncNode->pFsm->FpReConfigCb(pSyncNode->pFsm, newSyncCfg, cbMeta); + } + } + + // restore finish + if (pEntry->index == pSyncNode->pLogStore->getLastIndex(pSyncNode->pLogStore)) { + if (pSyncNode->restoreFinish == false) { + if (pSyncNode->pFsm->FpRestoreFinishCb != NULL) { + pSyncNode->pFsm->FpRestoreFinishCb(pSyncNode->pFsm); + } + pSyncNode->restoreFinish = true; + sInfo("==syncMaybeAdvanceCommitIndex== restoreFinish set true %p vgId:%d", pSyncNode, pSyncNode->vgId); + + /* + tsem_post(&pSyncNode->restoreSem); + sInfo("==syncMaybeAdvanceCommitIndex== RestoreFinish tsem_post %p", pSyncNode); + */ } } @@ -162,4 +228,4 @@ bool syncAgree(SSyncNode* pSyncNode, SyncIndex index) { } } return false; -} \ No newline at end of file +} diff --git a/source/libs/sync/src/syncIO.c b/source/libs/sync/src/syncIO.c index 39760c32e83eddc060aeb9669fb252eaca816e54..e30a39e6342c4b7df77ee9cfdbe4f29333e36c16 100644 --- a/source/libs/sync/src/syncIO.c +++ b/source/libs/sync/src/syncIO.c @@ -74,7 +74,7 @@ int32_t syncIOSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) { { syncUtilMsgNtoH(pMsg->pCont); - char logBuf[256]; + char logBuf[256] = {0}; snprintf(logBuf, sizeof(logBuf), "==syncIOSendMsg== %s:%d", pEpSet->eps[0].fqdn, pEpSet->eps[0].port); syncRpcMsgLog2(logBuf, pMsg); @@ -89,7 +89,7 @@ int32_t syncIOSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) { int32_t syncIOEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) { int32_t ret = 0; - char logBuf[128]; + char logBuf[128] = {0}; syncRpcMsgLog2((char *)"==syncIOEqMsg==", pMsg); SRpcMsg *pTemp; diff --git a/source/libs/sync/src/syncIndexMgr.c b/source/libs/sync/src/syncIndexMgr.c index 5809cedb9038758744d20b8e6ee2270bd0720e47..4d556d21dde7e56c2048cc314f86ad0a8949bc37 100644 --- a/source/libs/sync/src/syncIndexMgr.c +++ b/source/libs/sync/src/syncIndexMgr.c @@ -60,7 +60,9 @@ void syncIndexMgrSetIndex(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId, return; } } - assert(0); + + // maybe config change + // assert(0); } SyncIndex syncIndexMgrGetIndex(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId) { @@ -74,7 +76,7 @@ SyncIndex syncIndexMgrGetIndex(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaf } cJSON *syncIndexMgr2Json(SSyncIndexMgr *pSyncIndexMgr) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON *pRoot = cJSON_CreateObject(); if (pSyncIndexMgr != NULL) { diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index d9ff60bbe22b573db34331e5aabbd04b06ff5616..620fc514c6960754762187445b167098ce4382d3 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -13,7 +13,6 @@ * along with this program. If not, see . */ -#include #include "sync.h" #include "syncAppendEntries.h" #include "syncAppendEntriesReply.h" @@ -55,14 +54,17 @@ static void syncFreeNode(void* param); // --------------------------------- int32_t syncInit() { - int32_t ret; - tsNodeRefId = taosOpenRef(200, syncFreeNode); - if (tsNodeRefId < 0) { - sError("failed to init node ref"); - syncCleanUp(); - ret = -1; - } else { - ret = syncEnvStart(); + int32_t ret = 0; + + if (!syncEnvIsStart()) { + tsNodeRefId = taosOpenRef(200, syncFreeNode); + if (tsNodeRefId < 0) { + sError("failed to init node ref"); + syncCleanUp(); + ret = -1; + } else { + ret = syncEnvStart(); + } } return ret; @@ -98,6 +100,21 @@ void syncStart(int64_t rid) { if (pSyncNode == NULL) { return; } + + if (pSyncNode->pRaftCfg->isStandBy) { + syncNodeStartStandBy(pSyncNode); + } else { + syncNodeStart(pSyncNode); + } + + taosReleaseRef(tsNodeRefId, pSyncNode->rid); +} + +void syncStartNormal(int64_t rid) { + SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid); + if (pSyncNode == NULL) { + return; + } syncNodeStart(pSyncNode); taosReleaseRef(tsNodeRefId, pSyncNode->rid); @@ -124,9 +141,38 @@ void syncStop(int64_t rid) { taosRemoveRef(tsNodeRefId, rid); } +int32_t syncSetStandby(int64_t rid) { + SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid); + if (pSyncNode == NULL) { + return -1; + } + + if (pSyncNode->state != TAOS_SYNC_STATE_LEADER) { + taosReleaseRef(tsNodeRefId, pSyncNode->rid); + return -1; + } + + // state change + pSyncNode->state = TAOS_SYNC_STATE_FOLLOWER; + syncNodeStopHeartbeatTimer(pSyncNode); + + // reset elect timer, long enough + int32_t electMS = TIMER_MAX_MS; + int32_t ret = syncNodeRestartElectTimer(pSyncNode, electMS); + ASSERT(ret == 0); + + pSyncNode->pRaftCfg->isStandBy = 1; + raftCfgPersist(pSyncNode->pRaftCfg); + + taosReleaseRef(tsNodeRefId, pSyncNode->rid); + return 0; +} + int32_t syncReconfig(int64_t rid, const SSyncCfg* pSyncCfg) { int32_t ret = 0; char* configChange = syncCfg2Str((SSyncCfg*)pSyncCfg); + sInfo("==syncReconfig== newconfig:%s", configChange); + SRpcMsg rpcMsg = {0}; rpcMsg.msgType = TDMT_VND_SYNC_CONFIG_CHANGE; rpcMsg.info.noResp = 1; @@ -155,6 +201,18 @@ ESyncState syncGetMyRole(int64_t rid) { return state; } +bool syncIsRestoreFinish(int64_t rid) { + SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid); + if (pSyncNode == NULL) { + return false; + } + assert(rid == pSyncNode->rid); + bool b = pSyncNode->restoreFinish; + + taosReleaseRef(tsNodeRefId, pSyncNode->rid); + return b; +} + const char* syncGetMyRoleStr(int64_t rid) { const char* s = syncUtilState2String(syncGetMyRole(rid)); return s; @@ -240,7 +298,7 @@ int32_t syncGetAndDelRespRpc(int64_t rid, uint64_t index, SRpcMsg* msg) { return ret; } -void syncSetMsgCb(int64_t rid, const SMsgCb *msgcb) { +void syncSetMsgCb(int64_t rid, const SMsgCb* msgcb) { SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid); if (pSyncNode == NULL) { sTrace("syncSetQ get pSyncNode is NULL, rid:%ld", rid); @@ -304,10 +362,9 @@ int32_t syncPropose(int64_t rid, const SRpcMsg* pMsg, bool isWeak) { sTrace("syncPropose msgType:%d ", pMsg->msgType); int32_t ret = TAOS_SYNC_PROPOSE_SUCCESS; - SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid); - if (pSyncNode == NULL) { - return TAOS_SYNC_PROPOSE_OTHER_ERROR; - } + SSyncNode* pSyncNode = taosAcquireRef(tsNodeRefId, rid); + if (pSyncNode == NULL) return TAOS_SYNC_PROPOSE_OTHER_ERROR; + assert(rid == pSyncNode->rid); if (pSyncNode->state == TAOS_SYNC_STATE_LEADER) { @@ -319,14 +376,13 @@ int32_t syncPropose(int64_t rid, const SRpcMsg* pMsg, bool isWeak) { SyncClientRequest* pSyncMsg = syncClientRequestBuild2(pMsg, seqNum, isWeak, pSyncNode->vgId); SRpcMsg rpcMsg; syncClientRequest2RpcMsg(pSyncMsg, &rpcMsg); - if (pSyncNode->FpEqMsg != NULL) { - pSyncNode->FpEqMsg(pSyncNode->msgcb, &rpcMsg); + + if (pSyncNode->FpEqMsg != NULL && (*pSyncNode->FpEqMsg)(pSyncNode->msgcb, &rpcMsg) == 0) { + ret = TAOS_SYNC_PROPOSE_SUCCESS; } else { sTrace("syncPropose pSyncNode->FpEqMsg is NULL"); } syncClientRequestDestroy(pSyncMsg); - ret = TAOS_SYNC_PROPOSE_SUCCESS; - } else { sTrace("syncPropose not leader, %s", syncUtilState2String(pSyncNode->state)); ret = TAOS_SYNC_PROPOSE_NOT_LEADER; @@ -337,7 +393,9 @@ int32_t syncPropose(int64_t rid, const SRpcMsg* pMsg, bool isWeak) { } // open/close -------------- -SSyncNode* syncNodeOpen(const SSyncInfo* pSyncInfo) { +SSyncNode* syncNodeOpen(const SSyncInfo* pOldSyncInfo) { + SSyncInfo* pSyncInfo = (SSyncInfo*)pOldSyncInfo; + SSyncNode* pSyncNode = (SSyncNode*)taosMemoryMalloc(sizeof(SSyncNode)); assert(pSyncNode != NULL); memset(pSyncNode, 0, sizeof(SSyncNode)); @@ -349,11 +407,25 @@ SSyncNode* syncNodeOpen(const SSyncInfo* pSyncInfo) { sError("failed to create dir:%s since %s", pSyncInfo->path, terrstr()); return NULL; } + } + snprintf(pSyncNode->configPath, sizeof(pSyncNode->configPath), "%s/raft_config.json", pSyncInfo->path); + if (!taosCheckExistFile(pSyncNode->configPath)) { // create raft config file - snprintf(pSyncNode->configPath, sizeof(pSyncNode->configPath), "%s/raft_config.json", pSyncInfo->path); - ret = syncCfgCreateFile((SSyncCfg*)&(pSyncInfo->syncCfg), pSyncNode->configPath); + ret = raftCfgCreateFile((SSyncCfg*)&(pSyncInfo->syncCfg), pSyncInfo->isStandBy, pSyncNode->configPath); assert(ret == 0); + + } else { + // update syncCfg by raft_config.json + pSyncNode->pRaftCfg = raftCfgOpen(pSyncNode->configPath); + assert(pSyncNode->pRaftCfg != NULL); + pSyncInfo->syncCfg = pSyncNode->pRaftCfg->cfg; + + char* seralized = raftCfg2Str(pSyncNode->pRaftCfg); + sInfo("syncNodeOpen update config :%s", seralized); + taosMemoryFree(seralized); + + raftCfgClose(pSyncNode->pRaftCfg); } // init by SSyncInfo @@ -490,6 +562,15 @@ SSyncNode* syncNodeOpen(const SSyncInfo* pSyncInfo) { pSyncNode->pSyncRespMgr = syncRespMgrCreate(NULL, 0); assert(pSyncNode->pSyncRespMgr != NULL); + // restore state + pSyncNode->restoreFinish = false; + pSyncNode->pSnapshot = NULL; + if (pSyncNode->pFsm->FpGetSnapshot != NULL) { + pSyncNode->pSnapshot = taosMemoryMalloc(sizeof(SSnapshot)); + pSyncNode->pFsm->FpGetSnapshot(pSyncNode->pFsm, pSyncNode->pSnapshot); + } + // tsem_init(&(pSyncNode->restoreSem), 0, 0); + // start in syncNodeStart // start raft // syncNodeBecomeFollower(pSyncNode); @@ -509,6 +590,20 @@ void syncNodeStart(SSyncNode* pSyncNode) { // use this now syncNodeAppendNoop(pSyncNode); syncMaybeAdvanceCommitIndex(pSyncNode); // maybe only one replica + + /* + sInfo("==syncNodeStart== RestoreFinish begin 1 replica tsem_wait %p", pSyncNode); + tsem_wait(&pSyncNode->restoreSem); + sInfo("==syncNodeStart== RestoreFinish end 1 replica tsem_wait %p", pSyncNode); + */ + + /* + while (pSyncNode->restoreFinish != true) { + taosMsleep(10); + } + */ + + sInfo("==syncNodeStart== restoreFinish ok 1 replica %p vgId:%d", pSyncNode, pSyncNode->vgId); return; } @@ -518,6 +613,19 @@ void syncNodeStart(SSyncNode* pSyncNode) { int32_t ret = 0; // ret = syncNodeStartPingTimer(pSyncNode); assert(ret == 0); + + /* + sInfo("==syncNodeStart== RestoreFinish begin multi replica tsem_wait %p", pSyncNode); + tsem_wait(&pSyncNode->restoreSem); + sInfo("==syncNodeStart== RestoreFinish end multi replica tsem_wait %p", pSyncNode); + */ + + /* + while (pSyncNode->restoreFinish != true) { + taosMsleep(10); + } + */ + sInfo("==syncNodeStart== restoreFinish ok multi replica %p vgId:%d", pSyncNode, pSyncNode->vgId); } void syncNodeStartStandBy(SSyncNode* pSyncNode) { @@ -554,6 +662,12 @@ void syncNodeClose(SSyncNode* pSyncNode) { taosMemoryFree(pSyncNode->pFsm); } + if (pSyncNode->pSnapshot != NULL) { + taosMemoryFree(pSyncNode->pSnapshot); + } + + // tsem_destroy(&pSyncNode->restoreSem); + // free memory in syncFreeNode // taosMemoryFree(pSyncNode); } @@ -701,7 +815,7 @@ int32_t syncNodeSendMsgByInfo(const SNodeInfo* nodeInfo, SSyncNode* pSyncNode, S } cJSON* syncNode2Json(const SSyncNode* pSyncNode) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); if (pSyncNode != NULL) { @@ -856,19 +970,20 @@ char* syncNode2SimpleStr(const SSyncNode* pSyncNode) { int len = 256; char* s = (char*)taosMemoryMalloc(len); snprintf(s, len, - "syncNode2SimpleStr vgId:%d currentTerm:%lu, commitIndex:%ld, state:%d %s, electTimerLogicClock:%lu, " + "syncNode2SimpleStr vgId:%d currentTerm:%lu, commitIndex:%ld, state:%d %s, isStandBy:%d, " + "electTimerLogicClock:%lu, " "electTimerLogicClockUser:%lu, " "electTimerMS:%d", pSyncNode->vgId, pSyncNode->pRaftStore->currentTerm, pSyncNode->commitIndex, pSyncNode->state, - syncUtilState2String(pSyncNode->state), pSyncNode->electTimerLogicClock, pSyncNode->electTimerLogicClockUser, - pSyncNode->electTimerMS); + syncUtilState2String(pSyncNode->state), pSyncNode->pRaftCfg->isStandBy, pSyncNode->electTimerLogicClock, + pSyncNode->electTimerLogicClockUser, pSyncNode->electTimerMS); return s; } -void syncNodeUpdateConfig(SSyncNode* pSyncNode, SSyncCfg* newConfig) { +void syncNodeUpdateConfig(SSyncNode* pSyncNode, SSyncCfg* newConfig, bool* isDrop) { + SSyncCfg oldConfig = pSyncNode->pRaftCfg->cfg; pSyncNode->pRaftCfg->cfg = *newConfig; - int32_t ret = raftCfgPersist(pSyncNode->pRaftCfg); - ASSERT(ret == 0); + int32_t ret = 0; // init internal pSyncNode->myNodeInfo = pSyncNode->pRaftCfg->cfg.nodeInfo[pSyncNode->pRaftCfg->cfg.myIndex]; @@ -895,7 +1010,34 @@ void syncNodeUpdateConfig(SSyncNode* pSyncNode, SSyncCfg* newConfig) { syncIndexMgrUpdate(pSyncNode->pNextIndex, pSyncNode); syncIndexMgrUpdate(pSyncNode->pMatchIndex, pSyncNode); + voteGrantedUpdate(pSyncNode->pVotesGranted, pSyncNode); + votesRespondUpdate(pSyncNode->pVotesRespond, pSyncNode); + + // isDrop + *isDrop = true; + bool IamInOld, IamInNew; + for (int i = 0; i < oldConfig.replicaNum; ++i) { + if (strcmp((oldConfig.nodeInfo)[i].nodeFqdn, pSyncNode->myNodeInfo.nodeFqdn) == 0 && + (oldConfig.nodeInfo)[i].nodePort == pSyncNode->myNodeInfo.nodePort) { + *isDrop = false; + break; + } + } + + for (int i = 0; i < newConfig->replicaNum; ++i) { + if (strcmp((newConfig->nodeInfo)[i].nodeFqdn, pSyncNode->myNodeInfo.nodeFqdn) == 0 && + (newConfig->nodeInfo)[i].nodePort == pSyncNode->myNodeInfo.nodePort) { + *isDrop = false; + break; + } + } + + if (!(*isDrop)) { + // change isStandBy to normal + pSyncNode->pRaftCfg->isStandBy = 0; + } + raftCfgPersist(pSyncNode->pRaftCfg); syncNodeLog2("==syncNodeUpdateConfig==", pSyncNode); } @@ -1196,7 +1338,7 @@ static int32_t syncNodeAppendNoop(SSyncNode* ths) { // on message ---- int32_t syncNodeOnPingCb(SSyncNode* ths, SyncPing* pMsg) { // log state - char logBuf[1024]; + char logBuf[1024] = {0}; snprintf(logBuf, sizeof(logBuf), "==syncNodeOnPingCb== vgId:%d, state: %d, %s, term:%lu electTimerLogicClock:%lu, " "electTimerLogicClockUser:%lu, electTimerMS:%d", diff --git a/source/libs/sync/src/syncMessage.c b/source/libs/sync/src/syncMessage.c index efefcbb3e7b217f7dd9781eac32659f2183bf5c1..fae069f2e6b13c0073c6309f889dc7f8f92c8c6e 100644 --- a/source/libs/sync/src/syncMessage.c +++ b/source/libs/sync/src/syncMessage.c @@ -210,11 +210,12 @@ void syncTimeoutFromRpcMsg(const SRpcMsg* pRpcMsg, SyncTimeout* pMsg) { SyncTimeout* syncTimeoutFromRpcMsg2(const SRpcMsg* pRpcMsg) { SyncTimeout* pMsg = syncTimeoutDeserialize2(pRpcMsg->pCont, pRpcMsg->contLen); + assert(pMsg != NULL); return pMsg; } cJSON* syncTimeout2Json(const SyncTimeout* pMsg) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); if (pMsg != NULL) { @@ -411,7 +412,7 @@ SyncPing* syncPingDeserialize3(void* buf, int32_t bufLen) { } uint32_t len; char* data = NULL; - if (tDecodeBinary(&decoder, (const uint8_t**)(&data), &len) < 0) { + if (tDecodeBinary(&decoder, (uint8_t**)(&data), &len) < 0) { return NULL; } assert(len = pMsg->dataLen); @@ -436,11 +437,12 @@ void syncPingFromRpcMsg(const SRpcMsg* pRpcMsg, SyncPing* pMsg) { SyncPing* syncPingFromRpcMsg2(const SRpcMsg* pRpcMsg) { SyncPing* pMsg = syncPingDeserialize2(pRpcMsg->pCont, pRpcMsg->contLen); + assert(pMsg != NULL); return pMsg; } cJSON* syncPing2Json(const SyncPing* pMsg) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); if (pMsg != NULL) { @@ -454,7 +456,7 @@ cJSON* syncPing2Json(const SyncPing* pMsg) { { uint64_t u64 = pMsg->srcId.addr; cJSON* pTmp = pSrcId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -469,7 +471,7 @@ cJSON* syncPing2Json(const SyncPing* pMsg) { { uint64_t u64 = pMsg->destId.addr; cJSON* pTmp = pDestId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -670,7 +672,7 @@ SyncPingReply* syncPingReplyDeserialize3(void* buf, int32_t bufLen) { } uint32_t len; char* data = NULL; - if (tDecodeBinary(&decoder, (const uint8_t**)(&data), &len) < 0) { + if (tDecodeBinary(&decoder, (uint8_t**)(&data), &len) < 0) { return NULL; } assert(len = pMsg->dataLen); @@ -695,11 +697,12 @@ void syncPingReplyFromRpcMsg(const SRpcMsg* pRpcMsg, SyncPingReply* pMsg) { SyncPingReply* syncPingReplyFromRpcMsg2(const SRpcMsg* pRpcMsg) { SyncPingReply* pMsg = syncPingReplyDeserialize2(pRpcMsg->pCont, pRpcMsg->contLen); + assert(pMsg != NULL); return pMsg; } cJSON* syncPingReply2Json(const SyncPingReply* pMsg) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); if (pMsg != NULL) { @@ -713,7 +716,7 @@ cJSON* syncPingReply2Json(const SyncPingReply* pMsg) { { uint64_t u64 = pMsg->srcId.addr; cJSON* pTmp = pSrcId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -728,7 +731,7 @@ cJSON* syncPingReply2Json(const SyncPingReply* pMsg) { { uint64_t u64 = pMsg->destId.addr; cJSON* pTmp = pDestId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -861,11 +864,12 @@ void syncClientRequestFromRpcMsg(const SRpcMsg* pRpcMsg, SyncClientRequest* pMsg // step 3. RpcMsg => SyncClientRequest, from queue SyncClientRequest* syncClientRequestFromRpcMsg2(const SRpcMsg* pRpcMsg) { SyncClientRequest* pMsg = syncClientRequestDeserialize2(pRpcMsg->pCont, pRpcMsg->contLen); + assert(pMsg != NULL); return pMsg; } cJSON* syncClientRequest2Json(const SyncClientRequest* pMsg) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); if (pMsg != NULL) { @@ -986,11 +990,12 @@ void syncRequestVoteFromRpcMsg(const SRpcMsg* pRpcMsg, SyncRequestVote* pMsg) { SyncRequestVote* syncRequestVoteFromRpcMsg2(const SRpcMsg* pRpcMsg) { SyncRequestVote* pMsg = syncRequestVoteDeserialize2(pRpcMsg->pCont, pRpcMsg->contLen); + assert(pMsg != NULL); return pMsg; } cJSON* syncRequestVote2Json(const SyncRequestVote* pMsg) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); if (pMsg != NULL) { @@ -1004,7 +1009,7 @@ cJSON* syncRequestVote2Json(const SyncRequestVote* pMsg) { { uint64_t u64 = pMsg->srcId.addr; cJSON* pTmp = pSrcId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -1018,7 +1023,7 @@ cJSON* syncRequestVote2Json(const SyncRequestVote* pMsg) { { uint64_t u64 = pMsg->destId.addr; cJSON* pTmp = pDestId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -1134,11 +1139,12 @@ void syncRequestVoteReplyFromRpcMsg(const SRpcMsg* pRpcMsg, SyncRequestVoteReply SyncRequestVoteReply* syncRequestVoteReplyFromRpcMsg2(const SRpcMsg* pRpcMsg) { SyncRequestVoteReply* pMsg = syncRequestVoteReplyDeserialize2(pRpcMsg->pCont, pRpcMsg->contLen); + assert(pMsg != NULL); return pMsg; } cJSON* syncRequestVoteReply2Json(const SyncRequestVoteReply* pMsg) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); if (pMsg != NULL) { @@ -1152,7 +1158,7 @@ cJSON* syncRequestVoteReply2Json(const SyncRequestVoteReply* pMsg) { { uint64_t u64 = pMsg->srcId.addr; cJSON* pTmp = pSrcId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -1166,7 +1172,7 @@ cJSON* syncRequestVoteReply2Json(const SyncRequestVoteReply* pMsg) { { uint64_t u64 = pMsg->destId.addr; cJSON* pTmp = pDestId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -1281,11 +1287,12 @@ void syncAppendEntriesFromRpcMsg(const SRpcMsg* pRpcMsg, SyncAppendEntries* pMsg SyncAppendEntries* syncAppendEntriesFromRpcMsg2(const SRpcMsg* pRpcMsg) { SyncAppendEntries* pMsg = syncAppendEntriesDeserialize2(pRpcMsg->pCont, pRpcMsg->contLen); + assert(pMsg != NULL); return pMsg; } cJSON* syncAppendEntries2Json(const SyncAppendEntries* pMsg) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); if (pMsg != NULL) { @@ -1299,7 +1306,7 @@ cJSON* syncAppendEntries2Json(const SyncAppendEntries* pMsg) { { uint64_t u64 = pMsg->srcId.addr; cJSON* pTmp = pSrcId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -1314,7 +1321,7 @@ cJSON* syncAppendEntries2Json(const SyncAppendEntries* pMsg) { { uint64_t u64 = pMsg->destId.addr; cJSON* pTmp = pDestId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -1444,11 +1451,12 @@ void syncAppendEntriesReplyFromRpcMsg(const SRpcMsg* pRpcMsg, SyncAppendEntriesR SyncAppendEntriesReply* syncAppendEntriesReplyFromRpcMsg2(const SRpcMsg* pRpcMsg) { SyncAppendEntriesReply* pMsg = syncAppendEntriesReplyDeserialize2(pRpcMsg->pCont, pRpcMsg->contLen); + assert(pMsg != NULL); return pMsg; } cJSON* syncAppendEntriesReply2Json(const SyncAppendEntriesReply* pMsg) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); if (pMsg != NULL) { @@ -1462,7 +1470,7 @@ cJSON* syncAppendEntriesReply2Json(const SyncAppendEntriesReply* pMsg) { { uint64_t u64 = pMsg->srcId.addr; cJSON* pTmp = pSrcId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -1477,7 +1485,7 @@ cJSON* syncAppendEntriesReply2Json(const SyncAppendEntriesReply* pMsg) { { uint64_t u64 = pMsg->destId.addr; cJSON* pTmp = pDestId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -1616,7 +1624,7 @@ void syncApplyMsg2OriginalRpcMsg(const SyncApplyMsg* pMsg, SRpcMsg* pOriginalRpc } cJSON* syncApplyMsg2Json(const SyncApplyMsg* pMsg) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); if (pMsg != NULL) { diff --git a/source/libs/sync/src/syncRaftCfg.c b/source/libs/sync/src/syncRaftCfg.c index dc540424ec48ae1489a48f27c8bcbc168e09f83a..3e1931e2c37e626b7ab049299a9b83b8a78a2cf1 100644 --- a/source/libs/sync/src/syncRaftCfg.c +++ b/source/libs/sync/src/syncRaftCfg.c @@ -28,11 +28,11 @@ SRaftCfg *raftCfgOpen(const char *path) { taosLSeekFile(pCfg->pFile, 0, SEEK_SET); - char buf[1024]; + char buf[1024] = {0}; int len = taosReadFile(pCfg->pFile, buf, sizeof(buf)); assert(len > 0); - int32_t ret = syncCfgFromStr(buf, &(pCfg->cfg)); + int32_t ret = raftCfgFromStr(buf, pCfg); assert(ret == 0); return pCfg; @@ -48,18 +48,26 @@ int32_t raftCfgClose(SRaftCfg *pRaftCfg) { int32_t raftCfgPersist(SRaftCfg *pRaftCfg) { assert(pRaftCfg != NULL); - char *s = syncCfg2Str(&(pRaftCfg->cfg)); + char *s = raftCfg2Str(pRaftCfg); taosLSeekFile(pRaftCfg->pFile, 0, SEEK_SET); - int64_t ret = taosWriteFile(pRaftCfg->pFile, s, strlen(s) + 1); - assert(ret == strlen(s) + 1); - taosMemoryFree(s); + char buf[CONFIG_FILE_LEN] = {0}; + memset(buf, 0, sizeof(buf)); + ASSERT(strlen(s) + 1 <= CONFIG_FILE_LEN); + snprintf(buf, sizeof(buf), "%s", s); + int64_t ret = taosWriteFile(pRaftCfg->pFile, buf, sizeof(buf)); + assert(ret == sizeof(buf)); + + // int64_t ret = taosWriteFile(pRaftCfg->pFile, s, strlen(s) + 1); + // assert(ret == strlen(s) + 1); + + taosMemoryFree(s); taosFsyncFile(pRaftCfg->pFile); return 0; } cJSON *syncCfg2Json(SSyncCfg *pSyncCfg) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON *pRoot = cJSON_CreateObject(); if (pSyncCfg != NULL) { @@ -76,9 +84,12 @@ cJSON *syncCfg2Json(SSyncCfg *pSyncCfg) { } } + return pRoot; + /* cJSON *pJson = cJSON_CreateObject(); cJSON_AddItemToObject(pJson, "SSyncCfg", pRoot); return pJson; + */ } char *syncCfg2Str(SSyncCfg *pSyncCfg) { @@ -90,7 +101,8 @@ char *syncCfg2Str(SSyncCfg *pSyncCfg) { int32_t syncCfgFromJson(const cJSON *pRoot, SSyncCfg *pSyncCfg) { memset(pSyncCfg, 0, sizeof(SSyncCfg)); - cJSON *pJson = cJSON_GetObjectItem(pRoot, "SSyncCfg"); + // cJSON *pJson = cJSON_GetObjectItem(pRoot, "SSyncCfg"); + const cJSON *pJson = pRoot; cJSON *pReplicaNum = cJSON_GetObjectItem(pJson, "replicaNum"); assert(cJSON_IsNumber(pReplicaNum)); @@ -133,30 +145,73 @@ int32_t syncCfgFromStr(const char *s, SSyncCfg *pSyncCfg) { } cJSON *raftCfg2Json(SRaftCfg *pRaftCfg) { - cJSON *pJson = syncCfg2Json(&(pRaftCfg->cfg)); + cJSON *pRoot = cJSON_CreateObject(); + cJSON_AddItemToObject(pRoot, "SSyncCfg", syncCfg2Json(&(pRaftCfg->cfg))); + cJSON_AddNumberToObject(pRoot, "isStandBy", pRaftCfg->isStandBy); + + cJSON *pJson = cJSON_CreateObject(); + cJSON_AddItemToObject(pJson, "RaftCfg", pRoot); return pJson; } char *raftCfg2Str(SRaftCfg *pRaftCfg) { - char *s = syncCfg2Str(&(pRaftCfg->cfg)); - return s; + cJSON *pJson = raftCfg2Json(pRaftCfg); + char * serialized = cJSON_Print(pJson); + cJSON_Delete(pJson); + return serialized; } -int32_t syncCfgCreateFile(SSyncCfg *pCfg, const char *path) { +int32_t raftCfgCreateFile(SSyncCfg *pCfg, int8_t isStandBy, const char *path) { assert(pCfg != NULL); TdFilePtr pFile = taosOpenFile(path, TD_FILE_CREATE | TD_FILE_WRITE); assert(pFile != NULL); - char * s = syncCfg2Str(pCfg); - int64_t ret = taosWriteFile(pFile, s, strlen(s) + 1); - assert(ret == strlen(s) + 1); + SRaftCfg raftCfg; + raftCfg.cfg = *pCfg; + raftCfg.isStandBy = isStandBy; + char *s = raftCfg2Str(&raftCfg); + + char buf[CONFIG_FILE_LEN] = {0}; + memset(buf, 0, sizeof(buf)); + ASSERT(strlen(s) + 1 <= CONFIG_FILE_LEN); + snprintf(buf, sizeof(buf), "%s", s); + int64_t ret = taosWriteFile(pFile, buf, sizeof(buf)); + assert(ret == sizeof(buf)); + + // int64_t ret = taosWriteFile(pFile, s, strlen(s) + 1); + // assert(ret == strlen(s) + 1); taosMemoryFree(s); taosCloseFile(&pFile); return 0; } +int32_t raftCfgFromJson(const cJSON *pRoot, SRaftCfg *pRaftCfg) { + // memset(pRaftCfg, 0, sizeof(SRaftCfg)); + cJSON *pJson = cJSON_GetObjectItem(pRoot, "RaftCfg"); + + cJSON *pJsonIsStandBy = cJSON_GetObjectItem(pJson, "isStandBy"); + pRaftCfg->isStandBy = cJSON_GetNumberValue(pJsonIsStandBy); + + cJSON * pJsonSyncCfg = cJSON_GetObjectItem(pJson, "SSyncCfg"); + int32_t code = syncCfgFromJson(pJsonSyncCfg, &(pRaftCfg->cfg)); + ASSERT(code == 0); + + return code; +} + +int32_t raftCfgFromStr(const char *s, SRaftCfg *pRaftCfg) { + cJSON *pRoot = cJSON_Parse(s); + assert(pRoot != NULL); + + int32_t ret = raftCfgFromJson(pRoot, pRaftCfg); + assert(ret == 0); + + cJSON_Delete(pRoot); + return 0; +} + // for debug ---------------------- void syncCfgPrint(SSyncCfg *pCfg) { char *serialized = syncCfg2Str(pCfg); diff --git a/source/libs/sync/src/syncRaftEntry.c b/source/libs/sync/src/syncRaftEntry.c index 21ee35eaf9c276636d754048095d6b2d44f18796..8755f71654382f3913a3c81b6ee1e9b6e91dbb69 100644 --- a/source/libs/sync/src/syncRaftEntry.c +++ b/source/libs/sync/src/syncRaftEntry.c @@ -107,7 +107,7 @@ SSyncRaftEntry* syncEntryDeserialize(const char* buf, uint32_t len) { } cJSON* syncEntry2Json(const SSyncRaftEntry* pEntry) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); if (pEntry != NULL) { diff --git a/source/libs/sync/src/syncRaftLog.c b/source/libs/sync/src/syncRaftLog.c index 07a9397a580332f427ab3b206359de3ec0accf40..a6397f8cba24694d6f36847af5e877c72bd1a920 100644 --- a/source/libs/sync/src/syncRaftLog.c +++ b/source/libs/sync/src/syncRaftLog.c @@ -16,6 +16,15 @@ #include "syncRaftLog.h" #include "wal.h" +static SSyncRaftEntry* logStoreGetLastEntry(SSyncLogStore* pLogStore); +static SyncIndex logStoreLastIndex(SSyncLogStore* pLogStore); +static SyncTerm logStoreLastTerm(SSyncLogStore* pLogStore); +static SSyncRaftEntry* logStoreGetEntry(SSyncLogStore* pLogStore, SyncIndex index); +static int32_t logStoreAppendEntry(SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry); +static int32_t logStoreTruncate(SSyncLogStore* pLogStore, SyncIndex fromIndex); +static int32_t logStoreUpdateCommitIndex(SSyncLogStore* pLogStore, SyncIndex index); +static SyncIndex logStoreGetCommitIndex(SSyncLogStore* pLogStore); + SSyncLogStore* logStoreCreate(SSyncNode* pSyncNode) { SSyncLogStore* pLogStore = taosMemoryMalloc(sizeof(SSyncLogStore)); assert(pLogStore != NULL); @@ -78,7 +87,9 @@ SSyncRaftEntry* logStoreGetEntry(SSyncLogStore* pLogStore, SyncIndex index) { if (index >= SYNC_INDEX_BEGIN && index <= logStoreLastIndex(pLogStore)) { SWalReadHandle* pWalHandle = walOpenReadHandle(pWal); - int32_t code = walReadWithHandle(pWalHandle, index); + ASSERT(pWalHandle != NULL); + + int32_t code = walReadWithHandle(pWalHandle, index); if (code != 0) { int32_t err = terrno; const char* errStr = tstrerror(err); @@ -179,7 +190,7 @@ SSyncRaftEntry* logStoreGetLastEntry(SSyncLogStore* pLogStore) { } cJSON* logStore2Json(SSyncLogStore* pLogStore) { - char u64buf[128]; + char u64buf[128] = {0}; SSyncLogStoreData* pData = (SSyncLogStoreData*)pLogStore->data; cJSON* pRoot = cJSON_CreateObject(); @@ -216,7 +227,7 @@ char* logStore2Str(SSyncLogStore* pLogStore) { } cJSON* logStoreSimple2Json(SSyncLogStore* pLogStore) { - char u64buf[128]; + char u64buf[128] = {0}; SSyncLogStoreData* pData = (SSyncLogStoreData*)pLogStore->data; cJSON* pRoot = cJSON_CreateObject(); diff --git a/source/libs/sync/src/syncRaftStore.c b/source/libs/sync/src/syncRaftStore.c index d6f2e91de7739efd535a23427168180fe2aabc86..52e815292607d69e7d364f6a11c31c184f07914a 100644 --- a/source/libs/sync/src/syncRaftStore.c +++ b/source/libs/sync/src/syncRaftStore.c @@ -34,7 +34,7 @@ SRaftStore *raftStoreOpen(const char *path) { memset(pRaftStore, 0, sizeof(*pRaftStore)); snprintf(pRaftStore->path, sizeof(pRaftStore->path), "%s", path); - char storeBuf[RAFT_STORE_BLOCK_SIZE]; + char storeBuf[RAFT_STORE_BLOCK_SIZE] = {0}; memset(storeBuf, 0, sizeof(storeBuf)); if (!raftStoreFileExist(pRaftStore->path)) { @@ -84,7 +84,7 @@ int32_t raftStorePersist(SRaftStore *pRaftStore) { assert(pRaftStore != NULL); int32_t ret; - char storeBuf[RAFT_STORE_BLOCK_SIZE]; + char storeBuf[RAFT_STORE_BLOCK_SIZE] = {0}; ret = raftStoreSerialize(pRaftStore, storeBuf, sizeof(storeBuf)); assert(ret == 0); @@ -107,7 +107,7 @@ int32_t raftStoreSerialize(SRaftStore *pRaftStore, char *buf, size_t len) { cJSON *pRoot = cJSON_CreateObject(); - char u64Buf[128]; + char u64Buf[128] = {0}; snprintf(u64Buf, sizeof(u64Buf), "%lu", pRaftStore->currentTerm); cJSON_AddStringToObject(pRoot, "current_term", u64Buf); @@ -117,7 +117,7 @@ int32_t raftStoreSerialize(SRaftStore *pRaftStore, char *buf, size_t len) { cJSON_AddNumberToObject(pRoot, "vote_for_vgid", pRaftStore->voteFor.vgId); uint64_t u64 = pRaftStore->voteFor.addr; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pRoot, "addr_host", host); @@ -184,7 +184,7 @@ void raftStoreSetTerm(SRaftStore *pRaftStore, SyncTerm term) { int32_t raftStoreFromJson(SRaftStore *pRaftStore, cJSON *pJson) { return 0; } cJSON *raftStore2Json(SRaftStore *pRaftStore) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON *pRoot = cJSON_CreateObject(); if (pRaftStore != NULL) { @@ -196,7 +196,7 @@ cJSON *raftStore2Json(SRaftStore *pRaftStore) { cJSON_AddStringToObject(pVoteFor, "addr", u64buf); { uint64_t u64 = pRaftStore->voteFor.addr; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pVoteFor, "addr_host", host); diff --git a/source/libs/sync/src/syncReplication.c b/source/libs/sync/src/syncReplication.c index 2fdb8a0e177f0f985c40b136ea29ce9f968c0fad..d17e64d936737ba7ea0dc5f33db407cfdf4bf205 100644 --- a/source/libs/sync/src/syncReplication.c +++ b/source/libs/sync/src/syncReplication.c @@ -75,7 +75,7 @@ int32_t syncNodeAppendEntriesPeers(SSyncNode* pSyncNode) { // SyncIndex lastIndex = syncUtilMinIndex(pSyncNode->pLogStore->getLastIndex(pSyncNode->pLogStore), nextIndex); SyncAppendEntries* pMsg = NULL; - SSyncRaftEntry* pEntry = logStoreGetEntry(pSyncNode->pLogStore, nextIndex); + SSyncRaftEntry* pEntry = pSyncNode->pLogStore->getEntry(pSyncNode->pLogStore, nextIndex); if (pEntry != NULL) { pMsg = syncAppendEntriesBuild(pEntry->bytes, pSyncNode->vgId); assert(pMsg != NULL); diff --git a/source/libs/sync/src/syncRequestVote.c b/source/libs/sync/src/syncRequestVote.c index 619a1546a96ad9642272b7227466d99be833be9f..265677129213c6887012ee72da9066aad25adc09 100644 --- a/source/libs/sync/src/syncRequestVote.c +++ b/source/libs/sync/src/syncRequestVote.c @@ -44,7 +44,7 @@ int32_t syncNodeOnRequestVoteCb(SSyncNode* ths, SyncRequestVote* pMsg) { int32_t ret = 0; - char logBuf[128]; + char logBuf[128] = {0}; snprintf(logBuf, sizeof(logBuf), "==syncNodeOnRequestVoteCb== term:%lu", ths->pRaftStore->currentTerm); syncRequestVoteLog2(logBuf, pMsg); diff --git a/source/libs/sync/src/syncRequestVoteReply.c b/source/libs/sync/src/syncRequestVoteReply.c index a6348dff50132f860ada45e9cc3bddfabd6d62d0..75236aee2bcec1ca9c7ae07165c427edbc1e0a04 100644 --- a/source/libs/sync/src/syncRequestVoteReply.c +++ b/source/libs/sync/src/syncRequestVoteReply.c @@ -39,7 +39,7 @@ int32_t syncNodeOnRequestVoteReplyCb(SSyncNode* ths, SyncRequestVoteReply* pMsg) { int32_t ret = 0; - char logBuf[128]; + char logBuf[128] = {0}; snprintf(logBuf, sizeof(logBuf), "==syncNodeOnRequestVoteReplyCb== term:%lu", ths->pRaftStore->currentTerm); syncRequestVoteReplyLog2(logBuf, pMsg); @@ -56,7 +56,7 @@ int32_t syncNodeOnRequestVoteReplyCb(SSyncNode* ths, SyncRequestVoteReply* pMsg) // } if (pMsg->term > ths->pRaftStore->currentTerm) { - char logBuf[128]; + char logBuf[128] = {0}; snprintf(logBuf, sizeof(logBuf), "syncNodeOnRequestVoteReplyCb error term, receive:%lu current:%lu", pMsg->term, ths->pRaftStore->currentTerm); syncNodePrint2(logBuf, ths); diff --git a/source/libs/sync/src/syncSnapshot.c b/source/libs/sync/src/syncSnapshot.c index 42b2bd993b515789934268f4400fece4f040f7c5..ccb0e6071b82e43bd23a9334e294a421a336e57b 100644 --- a/source/libs/sync/src/syncSnapshot.c +++ b/source/libs/sync/src/syncSnapshot.c @@ -15,6 +15,22 @@ #include "syncSnapshot.h" -int32_t takeSnapshot(SSyncFSM *pFsm, SSnapshot *pSnapshot) { return 0; } +SSyncSnapshotSender *snapshotSenderCreate(SSyncNode *pSyncNode) { return NULL; } -int32_t restoreSnapshot(SSyncFSM *pFsm, SSnapshot *pSnapshot) { return 0; } \ No newline at end of file +void snapshotSenderDestroy(SSyncSnapshotSender *pSender) {} + +int32_t snapshotSend(SSyncSnapshotSender *pSender) { return 0; } + +cJSON *snapshotSender2Json(SSyncSnapshotSender *pSender) { return NULL; } + +char *snapshotSender2Str(SSyncSnapshotSender *pSender) { return NULL; } + +SSyncSnapshotReceiver *snapshotReceiverCreate(SSyncNode *pSyncNode) { return NULL; } + +void snapshotReceiverDestroy(SSyncSnapshotReceiver *pReceiver) {} + +int32_t snapshotReceive(SSyncSnapshotReceiver *pReceiver) { return 0; } + +cJSON *snapshotReceiver2Json(SSyncSnapshotReceiver *pReceiver) { return NULL; } + +char *snapshotReceiver2Str(SSyncSnapshotReceiver *pReceiver) { return NULL; } diff --git a/source/libs/sync/src/syncUtil.c b/source/libs/sync/src/syncUtil.c index cf045a692611a64e75c2f4c595180f1e324e75f9..d754acd9f831ac18ce7e28b5ef2fda4b2d8650db 100644 --- a/source/libs/sync/src/syncUtil.c +++ b/source/libs/sync/src/syncUtil.c @@ -43,7 +43,7 @@ void syncUtilnodeInfo2EpSet(const SNodeInfo* pNodeInfo, SEpSet* pEpSet) { } void syncUtilraftId2EpSet(const SRaftId* raftId, SEpSet* pEpSet) { - char host[TSDB_FQDN_LEN]; + char host[TSDB_FQDN_LEN] = {0}; uint16_t port; syncUtilU642Addr(raftId->addr, host, sizeof(host), &port); @@ -62,7 +62,7 @@ void syncUtilraftId2EpSet(const SRaftId* raftId, SEpSet* pEpSet) { void syncUtilnodeInfo2raftId(const SNodeInfo* pNodeInfo, SyncGroupId vgId, SRaftId* raftId) { uint32_t ipv4 = taosGetIpv4FromFqdn(pNodeInfo->nodeFqdn); assert(ipv4 != 0xFFFFFFFF); - char ipbuf[128]; + char ipbuf[128] = {0}; tinet_ntoa(ipbuf, ipv4); raftId->addr = syncUtilAddr2U64(ipbuf, pNodeInfo->nodePort); raftId->vgId = vgId; @@ -106,7 +106,7 @@ int32_t syncUtilElectRandomMS(int32_t min, int32_t max) { int32_t syncUtilQuorum(int32_t replicaNum) { return replicaNum / 2 + 1; } cJSON* syncUtilNodeInfo2Json(const SNodeInfo* p) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); cJSON_AddStringToObject(pRoot, "nodeFqdn", p->nodeFqdn); @@ -118,12 +118,12 @@ cJSON* syncUtilNodeInfo2Json(const SNodeInfo* p) { } cJSON* syncUtilRaftId2Json(const SRaftId* p) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", p->addr); cJSON_AddStringToObject(pRoot, "addr", u64buf); - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(p->addr, host, sizeof(host), &port); cJSON_AddStringToObject(pRoot, "host", host); diff --git a/source/libs/sync/src/syncVoteMgr.c b/source/libs/sync/src/syncVoteMgr.c index 733dfd05b6deb88ed08df78858f358822bebbda7..528c2f26c85c17f33f0a783def69ef9f26798b1b 100644 --- a/source/libs/sync/src/syncVoteMgr.c +++ b/source/libs/sync/src/syncVoteMgr.c @@ -45,6 +45,17 @@ void voteGrantedDestroy(SVotesGranted *pVotesGranted) { } } +void voteGrantedUpdate(SVotesGranted *pVotesGranted, SSyncNode *pSyncNode) { + pVotesGranted->replicas = &(pSyncNode->replicasId); + pVotesGranted->replicaNum = pSyncNode->replicaNum; + voteGrantedClearVotes(pVotesGranted); + + pVotesGranted->term = 0; + pVotesGranted->quorum = pSyncNode->quorum; + pVotesGranted->toLeader = false; + pVotesGranted->pSyncNode = pSyncNode; +} + bool voteGrantedMajority(SVotesGranted *pVotesGranted) { bool ret = pVotesGranted->votes >= pVotesGranted->quorum; return ret; @@ -79,7 +90,7 @@ void voteGrantedReset(SVotesGranted *pVotesGranted, SyncTerm term) { } cJSON *voteGranted2Json(SVotesGranted *pVotesGranted) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON *pRoot = cJSON_CreateObject(); if (pVotesGranted != NULL) { @@ -168,6 +179,13 @@ void votesRespondDestory(SVotesRespond *pVotesRespond) { } } +void votesRespondUpdate(SVotesRespond *pVotesRespond, SSyncNode *pSyncNode) { + pVotesRespond->replicas = &(pSyncNode->replicasId); + pVotesRespond->replicaNum = pSyncNode->replicaNum; + pVotesRespond->term = 0; + pVotesRespond->pSyncNode = pSyncNode; +} + bool votesResponded(SVotesRespond *pVotesRespond, const SRaftId *pRaftId) { bool ret = false; for (int i = 0; i < pVotesRespond->replicaNum; ++i) { @@ -202,7 +220,7 @@ void votesRespondReset(SVotesRespond *pVotesRespond, SyncTerm term) { } cJSON *votesRespond2Json(SVotesRespond *pVotesRespond) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON *pRoot = cJSON_CreateObject(); if (pVotesRespond != NULL) { diff --git a/source/libs/sync/test/syncConfigChangeTest.cpp b/source/libs/sync/test/syncConfigChangeTest.cpp index cff692239a756081cf35191cf0787be5bd878326..1ab3ce203ad4a3968bc45ab2382108fa7d97f40c 100644 --- a/source/libs/sync/test/syncConfigChangeTest.cpp +++ b/source/libs/sync/test/syncConfigChangeTest.cpp @@ -42,9 +42,10 @@ void CommitCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta) { } if (cbMeta.index > beginIndex) { - char logBuf[256]; - snprintf(logBuf, sizeof(logBuf), "==callback== ==CommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s \n", - pFsm, cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state)); + char logBuf[256] = {0}; + snprintf(logBuf, sizeof(logBuf), + "==callback== ==CommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s flag:%lu\n", pFsm, + cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state), cbMeta.flag); syncRpcMsgLog2(logBuf, (SRpcMsg*)pMsg); } else { sTrace("==callback== ==CommitCb== do not apply again %ld", cbMeta.index); @@ -52,17 +53,18 @@ void CommitCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta) { } void PreCommitCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta) { - char logBuf[256]; + char logBuf[256] = {0}; snprintf(logBuf, sizeof(logBuf), - "==callback== ==PreCommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s \n", pFsm, cbMeta.index, - cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state)); + "==callback== ==PreCommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s flag:%lu\n", pFsm, + cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state), cbMeta.flag); syncRpcMsgLog2(logBuf, (SRpcMsg*)pMsg); } void RollBackCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta) { char logBuf[256]; - snprintf(logBuf, sizeof(logBuf), "==callback== ==RollBackCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s \n", - pFsm, cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state)); + snprintf(logBuf, sizeof(logBuf), + "==callback== ==RollBackCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s flag:%lu\n", pFsm, + cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state), cbMeta.flag); syncRpcMsgLog2(logBuf, (SRpcMsg*)pMsg); } @@ -73,12 +75,27 @@ int32_t GetSnapshotCb(struct SSyncFSM* pFsm, SSnapshot* pSnapshot) { return 0; } +void RestoreFinishCb(struct SSyncFSM* pFsm) { sTrace("==callback== ==RestoreFinishCb=="); } + +void ReConfigCb(struct SSyncFSM* pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta) { + sTrace("==callback== ==ReConfigCb== flag:0x%lX, isDrop:%d, index:%ld, code:%d, currentTerm:%lu, term:%lu", + cbMeta.flag, cbMeta.isDrop, cbMeta.index, cbMeta.code, cbMeta.currentTerm, cbMeta.term); +} + SSyncFSM* createFsm() { SSyncFSM* pFsm = (SSyncFSM*)taosMemoryMalloc(sizeof(SSyncFSM)); + memset(pFsm, 0, sizeof(*pFsm)); + pFsm->FpCommitCb = CommitCb; pFsm->FpPreCommitCb = PreCommitCb; pFsm->FpRollBackCb = RollBackCb; + pFsm->FpGetSnapshot = GetSnapshotCb; + pFsm->FpRestoreFinishCb = RestoreFinishCb; + + + pFsm->FpReConfigCb = ReConfigCb; + return pFsm; } @@ -106,6 +123,7 @@ int64_t createSyncNode(int32_t replicaNum, int32_t myIndex, int32_t vgId, SWal* syncInfo.pFsm = createFsm(); snprintf(syncInfo.path, sizeof(syncInfo.path), "%s_sync_replica%d_index%d", path, replicaNum, myIndex); syncInfo.pWal = pWal; + syncInfo.isStandBy = isStandBy; SSyncCfg* pCfg = &syncInfo.syncCfg; @@ -177,7 +195,7 @@ SRpcMsg* createRpcMsg(int i, int count, int myIndex) { int main(int argc, char** argv) { tsAsyncLog = 0; - sDebugFlag = DEBUG_TRACE + DEBUG_SCREEN + DEBUG_FILE; + sDebugFlag = DEBUG_TRACE + DEBUG_SCREEN + DEBUG_FILE + DEBUG_INFO; if (argc != 7) { usage(argv[0]); exit(-1); @@ -209,17 +227,21 @@ int main(int argc, char** argv) { int64_t rid = createSyncNode(replicaNum, myIndex, gVgId, pWal, (char*)gDir, isStandBy); assert(rid > 0); - if (isStandBy) { - syncStartStandBy(rid); - } else { - syncStart(rid); - } + syncStart(rid); + + /* + if (isStandBy) { + syncStartStandBy(rid); + } else { + syncStart(rid); + } + */ SSyncNode* pSyncNode = (SSyncNode*)syncNodeAcquire(rid); assert(pSyncNode != NULL); if (isConfigChange) { - configChange(rid, 3, myIndex); + configChange(rid, 2, myIndex); } //--------------------------- diff --git a/source/libs/sync/test/syncRaftCfgTest.cpp b/source/libs/sync/test/syncRaftCfgTest.cpp index d3c06fa83e88488eb410c77c68e4ea18aff590fd..f5b24db651f9ed94a290aa2e1ea9611a11f74a04 100644 --- a/source/libs/sync/test/syncRaftCfgTest.cpp +++ b/source/libs/sync/test/syncRaftCfgTest.cpp @@ -15,6 +15,21 @@ void logTest() { sFatal("--- sync log test: fatal"); } +SRaftCfg* createRaftCfg() { + SRaftCfg* pCfg = (SRaftCfg*)taosMemoryMalloc(sizeof(SRaftCfg)); + memset(pCfg, 0, sizeof(SRaftCfg)); + + pCfg->cfg.replicaNum = 3; + pCfg->cfg.myIndex = 1; + for (int i = 0; i < pCfg->cfg.replicaNum; ++i) { + ((pCfg->cfg.nodeInfo)[i]).nodePort = i * 100; + snprintf(((pCfg->cfg.nodeInfo)[i]).nodeFqdn, sizeof(((pCfg->cfg.nodeInfo)[i]).nodeFqdn), "100.200.300.%d", i); + } + pCfg->isStandBy = taosGetTimestampSec() % 100; + + return pCfg; +} + SSyncCfg* createSyncCfg() { SSyncCfg* pCfg = (SSyncCfg*)taosMemoryMalloc(sizeof(SSyncCfg)); memset(pCfg, 0, sizeof(SSyncCfg)); @@ -56,7 +71,7 @@ void test3() { if (taosCheckExistFile(s)) { printf("%s file: %s already exist! \n", (char*)__FUNCTION__, s); } else { - syncCfgCreateFile(pCfg, s); + raftCfgCreateFile(pCfg, 7, s); printf("%s create json file: %s \n", (char*)__FUNCTION__, s); } @@ -78,6 +93,7 @@ void test5() { assert(pCfg != NULL); pCfg->cfg.myIndex = taosGetTimestampSec(); + pCfg->isStandBy += 2; raftCfgPersist(pCfg); printf("%s update json file: %s myIndex->%d \n", (char*)__FUNCTION__, "./test3_raft_cfg.json", pCfg->cfg.myIndex); diff --git a/source/libs/sync/test/syncSnapshotTest.cpp b/source/libs/sync/test/syncSnapshotTest.cpp index 62bda5b22ec8633f1cb6ba2ff2cfbe224ead8c94..820500e2d8f8b57427fec1f20741755a2ddc2d5c 100644 --- a/source/libs/sync/test/syncSnapshotTest.cpp +++ b/source/libs/sync/test/syncSnapshotTest.cpp @@ -75,6 +75,7 @@ int32_t GetSnapshotCb(struct SSyncFSM *pFsm, SSnapshot *pSnapshot) { void initFsm() { pFsm = (SSyncFSM *)taosMemoryMalloc(sizeof(SSyncFSM)); + memset(pFsm, 0, sizeof(*pFsm)); pFsm->FpCommitCb = CommitCb; pFsm->FpPreCommitCb = PreCommitCb; pFsm->FpRollBackCb = RollBackCb; @@ -160,6 +161,8 @@ SyncClientRequest *step1(const SRpcMsg *pMsg) { } int main(int argc, char **argv) { + sprintf(tsTempDir, "%s", "."); + // taosInitLog((char *)"syncTest.log", 100000, 10); tsAsyncLog = 0; sDebugFlag = 143 + 64; diff --git a/source/libs/sync/test/syncTest.cpp b/source/libs/sync/test/syncTest.cpp index 76024e061effc99fe744fac4d7266a1fd94a9207..ffe8b81571beae6ead52398f1a0f1faf7067ebf0 100644 --- a/source/libs/sync/test/syncTest.cpp +++ b/source/libs/sync/test/syncTest.cpp @@ -49,7 +49,7 @@ void test4() { logTest((char*)__FUNCTION__); } -int main() { +int main(int argc, char** argv) { // taosInitLog("tmp/syncTest.log", 100); tsAsyncLog = 0; @@ -58,6 +58,14 @@ int main() { test3(); test4(); + if (argc == 2) { + bool bTaosDirExist = taosDirExist(argv[1]); + printf("%s bTaosDirExist:%d \n", argv[1], bTaosDirExist); + + bool bTaosCheckExistFile = taosCheckExistFile(argv[1]); + printf("%s bTaosCheckExistFile:%d \n", argv[1], bTaosCheckExistFile); + } + // taosCloseLog(); return 0; } diff --git a/source/libs/tdb/src/inc/tdbInt.h b/source/libs/tdb/src/inc/tdbInt.h index 9f0267da93fca6db1b35844e77fdf8877eb33847..6524e3c9bcd873180378b5cfea2404b1a461ac7b 100644 --- a/source/libs/tdb/src/inc/tdbInt.h +++ b/source/libs/tdb/src/inc/tdbInt.h @@ -55,8 +55,8 @@ typedef u32 SPgno; #define TDB_PUT_U24(p, v) \ do { \ int tv = (v); \ - (p)[2] = tv & 0xff; \ - (p)[1] = (tv >> 8) & 0xff; \ + (p)[1] = tv & 0xff; \ + (p)[2] = (tv >> 8) & 0xff; \ (p)[0] = (tv >> 16) & 0xff; \ } while (0) diff --git a/source/libs/tfs/test/tfsTest.cpp b/source/libs/tfs/test/tfsTest.cpp index 58c3a83aff60bf1852534d73ebea8292264a15f1..d53c4a49ba068dd96fc8629efbbab3f0cedc36d9 100644 --- a/source/libs/tfs/test/tfsTest.cpp +++ b/source/libs/tfs/test/tfsTest.cpp @@ -16,7 +16,7 @@ class TfsTest : public ::testing::Test { protected: - static void SetUpTestSuite() { root = "/tmp/tfsTest"; } + static void SetUpTestSuite() { root = TD_TMP_DIR_PATH "tfsTest"; } static void TearDownTestSuite() {} public: @@ -299,15 +299,15 @@ TEST_F(TfsTest, 04_File) { TEST_F(TfsTest, 05_MultiDisk) { int32_t code = 0; - const char *root00 = "/tmp/tfsTest00"; - const char *root01 = "/tmp/tfsTest01"; - const char *root10 = "/tmp/tfsTest10"; - const char *root11 = "/tmp/tfsTest11"; - const char *root12 = "/tmp/tfsTest12"; - const char *root20 = "/tmp/tfsTest20"; - const char *root21 = "/tmp/tfsTest21"; - const char *root22 = "/tmp/tfsTest22"; - const char *root23 = "/tmp/tfsTest23"; + const char *root00 = TD_TMP_DIR_PATH "tfsTest00"; + const char *root01 = TD_TMP_DIR_PATH "tfsTest01"; + const char *root10 = TD_TMP_DIR_PATH "tfsTest10"; + const char *root11 = TD_TMP_DIR_PATH "tfsTest11"; + const char *root12 = TD_TMP_DIR_PATH "tfsTest12"; + const char *root20 = TD_TMP_DIR_PATH "tfsTest20"; + const char *root21 = TD_TMP_DIR_PATH "tfsTest21"; + const char *root22 = TD_TMP_DIR_PATH "tfsTest22"; + const char *root23 = TD_TMP_DIR_PATH "tfsTest23"; SDiskCfg dCfg[9] = {0}; tstrncpy(dCfg[0].dir, root01, TSDB_FILENAME_LEN); diff --git a/source/libs/transport/inc/transComm.h b/source/libs/transport/inc/transComm.h index 3f82d6e2d85d2a345db7ed7a5bc2f1938a2eda62..a8093f46a25499cb9d073d9a7cb0aad2cdf90c04 100644 --- a/source/libs/transport/inc/transComm.h +++ b/source/libs/transport/inc/transComm.h @@ -20,22 +20,12 @@ extern "C" { #endif #include -#include "lz4.h" #include "os.h" -#include "osSocket.h" #include "taoserror.h" -#include "tglobal.h" -#include "thash.h" #include "theap.h" -#include "tidpool.h" -#include "tmd5.h" -#include "tmempool.h" -#include "tmsg.h" #include "transLog.h" #include "transportInt.h" -#include "tref.h" #include "trpc.h" -#include "ttimer.h" #include "tutil.h" typedef void* queue[2]; @@ -104,30 +94,9 @@ typedef void* queue[2]; /* Return the structure holding the given element. */ #define QUEUE_DATA(e, type, field) ((type*)((void*)((char*)(e)-offsetof(type, field)))) -#define TRANS_RETRY_COUNT_LIMIT 20 // retry count limit -#define TRANS_RETRY_INTERVAL 15 // ms retry interval -#define TRANS_CONN_TIMEOUT 3 // connect timeout - -typedef struct { - SRpcInfo* pRpc; // associated SRpcInfo - SEpSet epSet; // ip list provided by app - void* ahandle; // handle provided by app - // struct SRpcConn* pConn; // pConn allocated - tmsg_t msgType; // message type - uint8_t* pCont; // content provided by app - int32_t contLen; // content length - // int32_t code; // error code - // int16_t numOfTry; // number of try for different servers - // int8_t oldInUse; // server EP inUse passed by app - // int8_t redirect; // flag to indicate redirect - int8_t connType; // connection type - int64_t rid; // refId returned by taosAddRef - SRpcMsg* pRsp; // for synchronous API - tsem_t* pSem; // for synchronous API - char* ip; - uint32_t port; - // SEpSet* pSet; // for synchronous API -} SRpcReqContext; +#define TRANS_RETRY_COUNT_LIMIT 100 // retry count limit +#define TRANS_RETRY_INTERVAL 15 // ms retry interval +#define TRANS_CONN_TIMEOUT 3 // connect timeout typedef SRpcMsg STransMsg; typedef SRpcCtx STransCtx; @@ -135,8 +104,16 @@ typedef SRpcCtxVal STransCtxVal; typedef SRpcInfo STrans; typedef SRpcConnInfo STransHandleInfo; +/*convet from fqdn to ip */ +typedef struct SCvtAddr { + char ip[TSDB_FQDN_LEN]; + char fqdn[TSDB_FQDN_LEN]; + bool cvt; +} SCvtAddr; + typedef struct { - SEpSet epSet; // ip list provided by app + SEpSet epSet; // ip list provided by app + SEpSet origEpSet; void* ahandle; // handle provided by app tmsg_t msgType; // message type int8_t connType; // connection type cli/srv @@ -146,6 +123,7 @@ typedef struct { STransCtx appCtx; // STransMsg* pRsp; // for synchronous API tsem_t* pSem; // for synchronous API + SCvtAddr cvtAddr; int hThrdIdx; } STransConnCtx; @@ -186,7 +164,7 @@ typedef struct { #pragma pack(pop) -typedef enum { Normal, Quit, Release, Register } STransMsgType; +typedef enum { Normal, Quit, Release, Register, Update } STransMsgType; typedef enum { ConnNormal, ConnAcquire, ConnRelease, ConnBroken, ConnInPool } ConnStatus; #define container_of(ptr, type, member) ((type*)((char*)(ptr)-offsetof(type, member))) @@ -240,6 +218,22 @@ SAsyncPool* transCreateAsyncPool(uv_loop_t* loop, int sz, void* arg, AsyncCB cb) void transDestroyAsyncPool(SAsyncPool* pool); int transSendAsync(SAsyncPool* pool, queue* mq); +#define TRANS_DESTROY_ASYNC_POOL_MSG(pool, msgType, freeFunc) \ + do { \ + for (int i = 0; i < pool->nAsync; i++) { \ + uv_async_t* async = &(pool->asyncs[i]); \ + SAsyncItem* item = async->data; \ + while (!QUEUE_IS_EMPTY(&item->qmsg)) { \ + tTrace("destroy msg in async pool "); \ + queue* h = QUEUE_HEAD(&item->qmsg); \ + QUEUE_REMOVE(h); \ + msgType* msg = QUEUE_DATA(h, msgType, q); \ + if (msg != NULL) { \ + freeFunc(msg); \ + } \ + } \ + } \ + } while (0) int transInitBuffer(SConnBuffer* buf); int transClearBuffer(SConnBuffer* buf); int transDestroyBuffer(SConnBuffer* buf); @@ -262,6 +256,7 @@ void transSendRecv(void* shandle, const SEpSet* pEpSet, STransMsg* pMsg, STransM void transSendResponse(const STransMsg* msg); void transRegisterMsg(const STransMsg* msg); int transGetConnInfo(void* thandle, STransHandleInfo* pInfo); +void transSetDefaultAddr(void* shandle, const char* ip, const char* fqdn); void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads, void* fp, void* shandle); void* transInitClient(uint32_t ip, uint32_t port, char* label, int numOfThreads, void* fp, void* shandle); @@ -349,6 +344,8 @@ void transDQDestroy(SDelayQueue* queue); int transDQSched(SDelayQueue* queue, void (*func)(void* arg), void* arg, uint64_t timeoutMs); +void transPrintEpSet(SEpSet* pEpSet); +bool transEpSetIsEqual(SEpSet* a, SEpSet* b); /* * init global func */ diff --git a/source/libs/transport/inc/transportInt.h b/source/libs/transport/inc/transportInt.h index a498571f33cb09cdf93cb04b3fd862dffa13bebd..8aeae1b5ade26a1a320dae37cbfe67f676f66eeb 100644 --- a/source/libs/transport/inc/transportInt.h +++ b/source/libs/transport/inc/transportInt.h @@ -22,15 +22,13 @@ #include "lz4.h" #include "os.h" #include "taoserror.h" -#include "tglobal.h" #include "thash.h" -#include "tidpool.h" +#include "tref.h" #include "tmsg.h" #include "transLog.h" -#include "tref.h" #include "trpc.h" -#include "ttimer.h" #include "tutil.h" +#include "tglobal.h" #ifdef __cplusplus extern "C" { diff --git a/source/libs/transport/src/trans.c b/source/libs/transport/src/trans.c index 5627dbfbf54be3eeed4b4d132b19e2c6b9b1d030..84b0156e3697996a81f7743940b04d73c20d0a05 100644 --- a/source/libs/transport/src/trans.c +++ b/source/libs/transport/src/trans.c @@ -17,7 +17,7 @@ #include "transComm.h" -void* (*taosInitHandle[])(uint32_t ip, uint32_t port, char* label, int numOfThreads, void* fp, void* shandle) = { +void* (*taosInitHandle[])(uint32_t ip, uint32_t port, char* label, int32_t numOfThreads, void* fp, void* shandle) = { transInitServer, transInitClient}; void (*taosCloseHandle[])(void* arg) = {transCloseServer, transCloseClient}; @@ -27,6 +27,14 @@ void (*taosUnRefHandle[])(void* handle) = {transUnrefSrvHandle, transUnrefCliHan void (*transReleaseHandle[])(void* handle) = {transReleaseSrvHandle, transReleaseCliHandle}; +static int32_t transValidLocalFqdn(const char* localFqdn, uint32_t* ip) { + *ip = taosGetIpv4FromFqdn(localFqdn); + if (*ip == 0xFFFFFFFF) { + terrno = TSDB_CODE_RPC_FQDN_ERROR; + return -1; + } + return 0; +} void* rpcOpen(const SRpcInit* pInit) { SRpcInfo* pRpc = taosMemoryCalloc(1, sizeof(SRpcInfo)); if (pRpc == NULL) { @@ -35,7 +43,6 @@ void* rpcOpen(const SRpcInit* pInit) { if (pInit->label) { tstrncpy(pRpc->label, pInit->label, strlen(pInit->label) + 1); } - // register callback handle pRpc->cfp = pInit->cfp; pRpc->retry = pInit->rfp; @@ -48,10 +55,8 @@ void* rpcOpen(const SRpcInit* pInit) { uint32_t ip = 0; if (pInit->connType == TAOS_CONN_SERVER) { - ip = taosGetIpv4FromFqdn(pInit->localFqdn); - if (ip == 0xFFFFFFFF) { - tError("invalid fqdn: %s", pInit->localFqdn); - terrno = TSDB_CODE_RPC_FQDN_ERROR; + if (transValidLocalFqdn(pInit->localFqdn, &ip) != 0) { + tError("invalid fqdn: %s, errmsg: %s", pInit->localFqdn, terrstr()); taosMemoryFree(pRpc); return NULL; } @@ -77,37 +82,38 @@ void rpcClose(void* arg) { taosMemoryFree(pRpc); return; } -void* rpcMallocCont(int contLen) { - int size = contLen + TRANS_MSG_OVERHEAD; - char* start = (char*)taosMemoryCalloc(1, (size_t)size); +void* rpcMallocCont(int32_t contLen) { + int32_t size = contLen + TRANS_MSG_OVERHEAD; + char* start = taosMemoryCalloc(1, size); if (start == NULL) { tError("failed to malloc msg, size:%d", size); + terrno = TSDB_CODE_OUT_OF_MEMORY; return NULL; } else { tTrace("malloc mem:%p size:%d", start, size); } + return start + sizeof(STransMsgHead); } -void rpcFreeCont(void* cont) { - // impl - if (cont == NULL) { - return; - } +void rpcFreeCont(void* cont) { + if (cont == NULL) return; taosMemoryFree((char*)cont - TRANS_MSG_OVERHEAD); tTrace("free mem: %p", (char*)cont - TRANS_MSG_OVERHEAD); } -void* rpcReallocCont(void* ptr, int contLen) { - if (ptr == NULL) { - return rpcMallocCont(contLen); - } - char* st = (char*)ptr - TRANS_MSG_OVERHEAD; - int sz = contLen + TRANS_MSG_OVERHEAD; + +void* rpcReallocCont(void* ptr, int32_t contLen) { + if (ptr == NULL) return rpcMallocCont(contLen); + + char* st = (char*)ptr - TRANS_MSG_OVERHEAD; + int32_t sz = contLen + TRANS_MSG_OVERHEAD; st = taosMemoryRealloc(st, sz); if (st == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; return NULL; } + return st + TRANS_MSG_OVERHEAD; } @@ -116,8 +122,8 @@ void rpcSendRedirectRsp(void* thandle, const SEpSet* pEpSet) { assert(0); } -int rpcReportProgress(void* pConn, char* pCont, int contLen) { return -1; } -void rpcCancelRequest(int64_t rid) { return; } +int32_t rpcReportProgress(void* pConn, char* pCont, int32_t contLen) { return -1; } +void rpcCancelRequest(int64_t rid) { return; } void rpcSendRequest(void* shandle, const SEpSet* pEpSet, SRpcMsg* pMsg, int64_t* pRid) { transSendRequest(shandle, pEpSet, pMsg, NULL); @@ -129,8 +135,8 @@ void rpcSendRecv(void* shandle, SEpSet* pEpSet, SRpcMsg* pMsg, SRpcMsg* pRsp) { transSendRecv(shandle, pEpSet, pMsg, pRsp); } -void rpcSendResponse(const SRpcMsg* pMsg) { transSendResponse(pMsg); } -int rpcGetConnInfo(void* thandle, SRpcConnInfo* pInfo) { return transGetConnInfo((void*)thandle, pInfo); } +void rpcSendResponse(const SRpcMsg* pMsg) { transSendResponse(pMsg); } +int32_t rpcGetConnInfo(void* thandle, SRpcConnInfo* pInfo) { return transGetConnInfo((void*)thandle, pInfo); } void rpcRefHandle(void* handle, int8_t type) { assert(type == TAOS_CONN_SERVER || type == TAOS_CONN_CLIENT); @@ -148,6 +154,11 @@ void rpcReleaseHandle(void* handle, int8_t type) { (*transReleaseHandle[type])(handle); } +void rpcSetDefaultAddr(void* thandle, const char* ip, const char* fqdn) { + // later + transSetDefaultAddr(thandle, ip, fqdn); +} + int32_t rpcInit() { // impl later return 0; diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 92c5e9faf70f95741c52803be1680b97d33f21fa..a8e79266ac7865cc4cd019b6aab1b1bb906d0abf 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -63,7 +63,10 @@ typedef struct SCliThrdObj { SDelayQueue* delayQueue; uint64_t nextTimeout; // next timeout void* pTransInst; // - bool quit; + + SCvtAddr cvtAddr; + + bool quit; } SCliThrdObj; typedef struct SCliObj { @@ -103,6 +106,7 @@ static void cliDestroyConn(SCliConn* pConn, bool clear /*clear tcp handle o static void cliDestroy(uv_handle_t* handle); static void cliSend(SCliConn* pConn); +void cliMayCvtFqdnToIp(SEpSet* pEpSet, SCvtAddr* pCvtAddr); /* * set TCP connection timeout per-socket level */ @@ -116,7 +120,9 @@ static void cliHandleExcept(SCliConn* conn); static void cliHandleReq(SCliMsg* pMsg, SCliThrdObj* pThrd); static void cliHandleQuit(SCliMsg* pMsg, SCliThrdObj* pThrd); static void cliHandleRelease(SCliMsg* pMsg, SCliThrdObj* pThrd); -static void (*cliAsyncHandle[])(SCliMsg* pMsg, SCliThrdObj* pThrd) = {cliHandleReq, cliHandleQuit, cliHandleRelease}; +static void cliHandleUpdate(SCliMsg* pMsg, SCliThrdObj* pThrd); +static void (*cliAsyncHandle[])(SCliMsg* pMsg, SCliThrdObj* pThrd) = {cliHandleReq, cliHandleQuit, cliHandleRelease, + NULL, cliHandleUpdate}; static void cliSendQuit(SCliThrdObj* thrd); static void destroyUserdata(STransMsg* userdata); @@ -131,6 +137,19 @@ static void destroyThrdObj(SCliThrdObj* pThrd); static void cliWalkCb(uv_handle_t* handle, void* arg); +static void cliReleaseUnfinishedMsg(SCliConn* conn) { + SCliMsg* pMsg = NULL; + for (int i = 0; i < transQueueSize(&conn->cliMsgs); i++) { + pMsg = transQueueGet(&conn->cliMsgs, i); + if (pMsg != NULL && pMsg->ctx != NULL) { + if (conn->ctx.freeFunc != NULL) { + conn->ctx.freeFunc(pMsg->ctx->ahandle); + } + } + destroyCmsg(pMsg); + } +} + #define CLI_RELEASE_UV(loop) \ do { \ uv_walk(loop, cliWalkCb, NULL); \ @@ -161,6 +180,7 @@ static void cliWalkCb(uv_handle_t* handle, void* arg); transUnrefCliHandle(conn); \ } \ destroyCmsg(pMsg); \ + cliReleaseUnfinishedMsg(conn); \ addConnToPool(((SCliThrdObj*)conn->hostThrd)->pool, conn); \ return; \ } \ @@ -465,8 +485,8 @@ static void addConnToPool(void* pool, SCliConn* conn) { STrans* pTransInst = ((SCliThrdObj*)conn->hostThrd)->pTransInst; conn->expireTime = taosGetTimestampMs() + CONN_PERSIST_TIME(pTransInst->idleTime); - transCtxCleanup(&conn->ctx); transQueueClear(&conn->cliMsgs); + transCtxCleanup(&conn->ctx); conn->status = ConnInPool; char key[128] = {0}; @@ -683,6 +703,12 @@ static void cliHandleRelease(SCliMsg* pMsg, SCliThrdObj* pThrd) { transUnrefCliHandle(conn); } } +static void cliHandleUpdate(SCliMsg* pMsg, SCliThrdObj* pThrd) { + STransConnCtx* pCtx = pMsg->ctx; + + pThrd->cvtAddr = pCtx->cvtAddr; + destroyCmsg(pMsg); +} SCliConn* cliGetConn(SCliMsg* pMsg, SCliThrdObj* pThrd) { SCliConn* conn = NULL; @@ -702,7 +728,17 @@ SCliConn* cliGetConn(SCliMsg* pMsg, SCliThrdObj* pThrd) { } return conn; } - +void cliMayCvtFqdnToIp(SEpSet* pEpSet, SCvtAddr* pCvtAddr) { + if (pCvtAddr->cvt == false) { + return; + } + for (int i = 0; i < pEpSet->numOfEps && pEpSet->numOfEps == 1; i++) { + if (strncmp(pEpSet->eps[i].fqdn, pCvtAddr->fqdn, TSDB_FQDN_LEN) == 0) { + memset(pEpSet->eps[i].fqdn, 0, TSDB_FQDN_LEN); + memcpy(pEpSet->eps[i].fqdn, pCvtAddr->ip, TSDB_FQDN_LEN); + } + } +} void cliHandleReq(SCliMsg* pMsg, SCliThrdObj* pThrd) { uint64_t et = taosGetTimestampUs(); uint64_t el = et - pMsg->st; @@ -712,6 +748,8 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrdObj* pThrd) { STransConnCtx* pCtx = pMsg->ctx; STrans* pTransInst = pThrd->pTransInst; + cliMayCvtFqdnToIp(&pCtx->epSet, &pThrd->cvtAddr); + SCliConn* conn = cliGetConn(pMsg, pThrd); if (conn != NULL) { conn->hThrdIdx = pCtx->hThrdIdx; @@ -841,7 +879,6 @@ static SCliThrdObj* createThrdObj() { pThrd->timer.data = pThrd; pThrd->pool = createConnPool(4); - transDQCreate(pThrd->loop, &pThrd->delayQueue); pThrd->quit = false; @@ -855,6 +892,7 @@ static void destroyThrdObj(SCliThrdObj* pThrd) { taosThreadJoin(pThrd->thread, NULL); CLI_RELEASE_UV(pThrd->loop); taosThreadMutexDestroy(&pThrd->msgMtx); + TRANS_DESTROY_ASYNC_POOL_MSG(pThrd->asyncPool, SCliMsg, destroyCmsg); transDestroyAsyncPool(pThrd->asyncPool); transDQDestroy(pThrd->delayQueue); @@ -907,16 +945,22 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) { STransConnCtx* pCtx = pMsg->ctx; SEpSet* pEpSet = &pCtx->epSet; + transPrintEpSet(pEpSet); + + if (pCtx->retryCount == 0) { + pCtx->origEpSet = pCtx->epSet; + } /* * upper layer handle retry if code equal TSDB_CODE_RPC_NETWORK_UNAVAIL */ tmsg_t msgType = pCtx->msgType; if ((pTransInst->retry != NULL && (pTransInst->retry(pResp->code))) || - ((pResp->code == TSDB_CODE_RPC_NETWORK_UNAVAIL) && msgType == TDMT_MND_CONNECT)) { + (pResp->code == TSDB_CODE_RPC_NETWORK_UNAVAIL || pResp->code == TSDB_CODE_APP_NOT_READY || + pResp->code == TSDB_CODE_NODE_NOT_DEPLOYED || pResp->code == TSDB_CODE_SYN_NOT_LEADER)) { pMsg->sent = 0; pMsg->st = taosGetTimestampUs(); pCtx->retryCount += 1; - if (msgType == TDMT_MND_CONNECT && pResp->code == TSDB_CODE_RPC_NETWORK_UNAVAIL) { + if (pResp->code == TSDB_CODE_RPC_NETWORK_UNAVAIL) { if (pCtx->retryCount < pEpSet->numOfEps) { pEpSet->inUse = (++pEpSet->inUse) % pEpSet->numOfEps; @@ -931,9 +975,9 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) { if (pResp->contLen == 0) { pEpSet->inUse = (++pEpSet->inUse) % pEpSet->numOfEps; } else { - SMEpSet emsg = {0}; - tDeserializeSMEpSet(pResp->pCont, pResp->contLen, &emsg); - pCtx->epSet = emsg.epSet; + SEpSet epSet = {0}; + tDeserializeSEpSet(pResp->pCont, pResp->contLen, &epSet); + pCtx->epSet = epSet; } addConnToPool(pThrd->pool, pConn); tTrace("use remote epset, current in use: %d, retry count:%d, try limit: %d", pEpSet->inUse, pCtx->retryCount + 1, @@ -958,7 +1002,11 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) { pCtx->pRsp = NULL; } else { tTrace("%s cli conn %p handle resp", pTransInst->label, pConn); - pTransInst->cfp(pTransInst->parent, pResp, pEpSet); + if (pResp->code != 0 || pCtx->retryCount == 0 || transEpSetIsEqual(&pCtx->epSet, &pCtx->origEpSet)) { + pTransInst->cfp(pTransInst->parent, pResp, NULL); + } else { + pTransInst->cfp(pTransInst->parent, pResp, pEpSet); + } } return 0; } @@ -1067,4 +1115,32 @@ void transSendRecv(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STransM taosMemoryFree(pSem); } +/* + * + **/ +void transSetDefaultAddr(void* ahandle, const char* ip, const char* fqdn) { + STrans* pTransInst = ahandle; + + SCvtAddr cvtAddr = {0}; + if (ip != NULL && fqdn != NULL) { + memcpy(cvtAddr.ip, ip, strlen(ip)); + memcpy(cvtAddr.fqdn, fqdn, strlen(fqdn)); + cvtAddr.cvt = true; + } + for (int i = 0; i < pTransInst->numOfThreads; i++) { + STransConnCtx* pCtx = taosMemoryCalloc(1, sizeof(STransConnCtx)); + pCtx->hThrdIdx = i; + pCtx->cvtAddr = cvtAddr; + + SCliMsg* cliMsg = taosMemoryCalloc(1, sizeof(SCliMsg)); + cliMsg->ctx = pCtx; + cliMsg->type = Update; + + SCliThrdObj* thrd = ((SCliObj*)pTransInst->tcphandle)->pThreadObj[i]; + tDebug("update epset at thread:%d, threadID:%" PRId64 "", i, thrd->thread); + + tsem_t* pSem = pCtx->pSem; + transSendAsync(thrd->asyncPool, &(cliMsg->q)); + } +} #endif diff --git a/source/libs/transport/src/transComm.c b/source/libs/transport/src/transComm.c index 7014cc481f6f3908793ea2f6fc074a04fbe7472b..333ec44fe40246254ef03e6646e4e7e7a932d93a 100644 --- a/source/libs/transport/src/transComm.c +++ b/source/libs/transport/src/transComm.c @@ -190,6 +190,7 @@ SAsyncPool* transCreateAsyncPool(uv_loop_t* loop, int sz, void* arg, AsyncCB cb) } return pool; } + void transDestroyAsyncPool(SAsyncPool* pool) { for (int i = 0; i < pool->nAsync; i++) { uv_async_t* async = &(pool->asyncs[i]); @@ -233,7 +234,7 @@ void transCtxCleanup(STransCtx* ctx) { STransCtxVal* iter = taosHashIterate(ctx->args, NULL); while (iter) { - iter->freeFunc(iter->val); + ctx->freeFunc(iter->val); iter = taosHashIterate(ctx->args, iter); } @@ -245,6 +246,7 @@ void transCtxMerge(STransCtx* dst, STransCtx* src) { if (dst->args == NULL) { dst->args = src->args; dst->brokenVal = src->brokenVal; + dst->freeFunc = src->freeFunc; src->args = NULL; return; } @@ -257,7 +259,7 @@ void transCtxMerge(STransCtx* dst, STransCtx* src) { STransCtxVal* dVal = taosHashGet(dst->args, key, klen); if (dVal) { - dVal->freeFunc(dVal->val); + dst->freeFunc(dVal->val); } taosHashPut(dst->args, key, klen, sVal, sizeof(*sVal)); iter = taosHashIterate(src->args, iter); @@ -445,4 +447,27 @@ int transDQSched(SDelayQueue* queue, void (*func)(void* arg), void* arg, uint64_ uv_timer_start(queue->timer, transDQTimeout, timeoutMs, 0); return 0; } + +void transPrintEpSet(SEpSet* pEpSet) { + if (pEpSet == NULL) { + tTrace("NULL epset"); + return; + } + tTrace("epset begin inUse: %d", pEpSet->inUse); + for (int i = 0; i < pEpSet->numOfEps; i++) { + tTrace("ip: %s, port: %d", pEpSet->eps[i].fqdn, pEpSet->eps[i].port); + } + tTrace("epset end"); +} +bool transEpSetIsEqual(SEpSet* a, SEpSet* b) { + if (a->numOfEps != b->numOfEps || a->inUse != b->inUse) { + return false; + } + for (int i = 0; i < a->numOfEps; i++) { + if (strncmp(a->eps[i].fqdn, b->eps[i].fqdn, TSDB_FQDN_LEN) != 0 || a->eps[i].port != b->eps[i].port) { + return false; + } + } + return true; +} #endif diff --git a/source/libs/transport/src/transSrv.c b/source/libs/transport/src/transSvr.c similarity index 91% rename from source/libs/transport/src/transSrv.c rename to source/libs/transport/src/transSvr.c index da83a6f37fc5b03cc880165d25689c918963ec7f..52b36433bb45ace6b0fa4224fb80b65e0e5e2627 100644 --- a/source/libs/transport/src/transSrv.c +++ b/source/libs/transport/src/transSvr.c @@ -20,15 +20,15 @@ static TdThreadOnce transModuleInit = PTHREAD_ONCE_INIT; static char* notify = "a"; -static int transSrvInst = 0; +static int tranSSvrInst = 0; typedef struct { int notifyCount; // int init; // init or not STransMsg msg; -} SSrvRegArg; +} SSvrRegArg; -typedef struct SSrvConn { +typedef struct SSvrConn { T_REF_DECLARE() uv_tcp_t* pTcp; uv_write_t pWriter; @@ -42,7 +42,7 @@ typedef struct SSrvConn { void* hostThrd; STransQueue srvMsgs; - SSrvRegArg regArg; + SSvrRegArg regArg; bool broken; // conn broken; ConnStatus status; @@ -55,14 +55,14 @@ typedef struct SSrvConn { char user[TSDB_UNI_LEN]; // user ID for the link char secret[TSDB_PASSWORD_LEN]; char ckey[TSDB_PASSWORD_LEN]; // ciphering key -} SSrvConn; +} SSvrConn; -typedef struct SSrvMsg { - SSrvConn* pConn; +typedef struct SSvrMsg { + SSvrConn* pConn; STransMsg msg; queue q; STransMsgType type; -} SSrvMsg; +} SSvrMsg; typedef struct SWorkThrdObj { TdThread thread; @@ -127,26 +127,26 @@ static void uvWorkAfterTask(uv_work_t* req, int status); static void uvWalkCb(uv_handle_t* handle, void* arg); static void uvFreeCb(uv_handle_t* handle); -static void uvStartSendRespInternal(SSrvMsg* smsg); -static void uvPrepareSendData(SSrvMsg* msg, uv_buf_t* wb); -static void uvStartSendResp(SSrvMsg* msg); +static void uvStartSendRespInternal(SSvrMsg* smsg); +static void uvPrepareSendData(SSvrMsg* msg, uv_buf_t* wb); +static void uvStartSendResp(SSvrMsg* msg); -static void uvNotifyLinkBrokenToApp(SSrvConn* conn); +static void uvNotifyLinkBrokenToApp(SSvrConn* conn); -static void destroySmsg(SSrvMsg* smsg); +static void destroySmsg(SSvrMsg* smsg); // check whether already read complete packet -static SSrvConn* createConn(void* hThrd); -static void destroyConn(SSrvConn* conn, bool clear /*clear handle or not*/); -static void destroyConnRegArg(SSrvConn* conn); +static SSvrConn* createConn(void* hThrd); +static void destroyConn(SSvrConn* conn, bool clear /*clear handle or not*/); +static void destroyConnRegArg(SSvrConn* conn); -static int reallocConnRefHandle(SSrvConn* conn); +static int reallocConnRefHandle(SSvrConn* conn); -static void uvHandleQuit(SSrvMsg* msg, SWorkThrdObj* thrd); -static void uvHandleRelease(SSrvMsg* msg, SWorkThrdObj* thrd); -static void uvHandleResp(SSrvMsg* msg, SWorkThrdObj* thrd); -static void uvHandleRegister(SSrvMsg* msg, SWorkThrdObj* thrd); -static void (*transAsyncHandle[])(SSrvMsg* msg, SWorkThrdObj* thrd) = {uvHandleResp, uvHandleQuit, uvHandleRelease, - uvHandleRegister}; +static void uvHandleQuit(SSvrMsg* msg, SWorkThrdObj* thrd); +static void uvHandleRelease(SSvrMsg* msg, SWorkThrdObj* thrd); +static void uvHandleResp(SSvrMsg* msg, SWorkThrdObj* thrd); +static void uvHandleRegister(SSvrMsg* msg, SWorkThrdObj* thrd); +static void (*transAsyncHandle[])(SSvrMsg* msg, SWorkThrdObj* thrd) = {uvHandleResp, uvHandleQuit, uvHandleRelease, + uvHandleRegister, NULL}; static int32_t exHandlesMgt; @@ -178,7 +178,7 @@ static bool addHandleToAcceptloop(void* arg); tTrace("server conn %p received release request", conn); \ \ STransMsg tmsg = {.code = 0, .info.handle = (void*)conn, .info.ahandle = NULL}; \ - SSrvMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSrvMsg)); \ + SSvrMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSvrMsg)); \ srvMsg->msg = tmsg; \ srvMsg->type = Release; \ srvMsg->pConn = conn; \ @@ -233,18 +233,18 @@ static bool addHandleToAcceptloop(void* arg); } while (0) void uvAllocRecvBufferCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf) { - SSrvConn* conn = handle->data; + SSvrConn* conn = handle->data; SConnBuffer* pBuf = &conn->readBuf; transAllocBuffer(pBuf, buf); } // refers specifically to query or insert timeout static void uvHandleActivityTimeout(uv_timer_t* handle) { - SSrvConn* conn = handle->data; + SSvrConn* conn = handle->data; tDebug("%p timeout since no activity", conn); } -static void uvHandleReq(SSrvConn* pConn) { +static void uvHandleReq(SSvrConn* pConn) { SConnBuffer* pBuf = &pConn->readBuf; char* msg = pBuf->buf; uint32_t msgLen = pBuf->len; @@ -295,14 +295,14 @@ static void uvHandleReq(SSrvConn* pConn) { // no ref here } - // if pHead->noResp = 1, + // pHead->noResp = 1, // 1. server application should not send resp on handle // 2. once send out data, cli conn released to conn pool immediately // 3. not mixed with persist transMsg.info.handle = (void*)uvAcquireExHandle(pConn->refId); - tTrace("server handle %p conn: %p translated to app, refId: %" PRIu64 "", transMsg.info.handle, pConn, pConn->refId); transMsg.info.refId = pConn->refId; + tTrace("server handle %p conn: %p translated to app, refId: %" PRIu64 "", transMsg.info.handle, pConn, pConn->refId); assert(transMsg.info.handle != NULL); if (pHead->noResp == 1) { transMsg.info.refId = -1; @@ -316,7 +316,7 @@ static void uvHandleReq(SSrvConn* pConn) { void uvOnRecvCb(uv_stream_t* cli, ssize_t nread, const uv_buf_t* buf) { // opt - SSrvConn* conn = cli->data; + SSvrConn* conn = cli->data; SConnBuffer* pBuf = &conn->readBuf; if (nread > 0) { pBuf->len += nread; @@ -354,17 +354,17 @@ void uvAllocConnBufferCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* b void uvOnTimeoutCb(uv_timer_t* handle) { // opt - SSrvConn* pConn = handle->data; + SSvrConn* pConn = handle->data; tError("server conn %p time out", pConn); } void uvOnSendCb(uv_write_t* req, int status) { - SSrvConn* conn = req->data; + SSvrConn* conn = req->data; // transClearBuffer(&conn->readBuf); if (status == 0) { tTrace("server conn %p data already was written on stream", conn); if (!transQueueEmpty(&conn->srvMsgs)) { - SSrvMsg* msg = transQueuePop(&conn->srvMsgs); + SSvrMsg* msg = transQueuePop(&conn->srvMsgs); // if (msg->type == Release && conn->status != ConnNormal) { // conn->status = ConnNormal; // transUnrefSrvHandle(conn); @@ -376,7 +376,7 @@ void uvOnSendCb(uv_write_t* req, int status) { destroySmsg(msg); // send second data, just use for push if (!transQueueEmpty(&conn->srvMsgs)) { - msg = (SSrvMsg*)transQueueGet(&conn->srvMsgs, 0); + msg = (SSvrMsg*)transQueueGet(&conn->srvMsgs, 0); if (msg->type == Register && conn->status == ConnAcquire) { conn->regArg.notifyCount = 0; conn->regArg.init = 1; @@ -389,7 +389,7 @@ void uvOnSendCb(uv_write_t* req, int status) { transQueuePop(&conn->srvMsgs); taosMemoryFree(msg); - msg = (SSrvMsg*)transQueueGet(&conn->srvMsgs, 0); + msg = (SSvrMsg*)transQueueGet(&conn->srvMsgs, 0); if (msg != NULL) { uvStartSendRespInternal(msg); } @@ -415,10 +415,10 @@ static void uvOnPipeWriteCb(uv_write_t* req, int status) { taosMemoryFree(req); } -static void uvPrepareSendData(SSrvMsg* smsg, uv_buf_t* wb) { +static void uvPrepareSendData(SSvrMsg* smsg, uv_buf_t* wb) { tTrace("server conn %p prepare to send resp", smsg->pConn); - SSrvConn* pConn = smsg->pConn; + SSvrConn* pConn = smsg->pConn; STransMsg* pMsg = &smsg->msg; if (pMsg->pCont == 0) { pMsg->pCont = (void*)rpcMallocCont(0); @@ -455,17 +455,17 @@ static void uvPrepareSendData(SSrvMsg* smsg, uv_buf_t* wb) { wb->len = len; } -static void uvStartSendRespInternal(SSrvMsg* smsg) { +static void uvStartSendRespInternal(SSvrMsg* smsg) { uv_buf_t wb; uvPrepareSendData(smsg, &wb); - SSrvConn* pConn = smsg->pConn; + SSvrConn* pConn = smsg->pConn; // uv_timer_stop(&pConn->pTimer); uv_write(&pConn->pWriter, (uv_stream_t*)pConn->pTcp, &wb, 1, uvOnSendCb); } -static void uvStartSendResp(SSrvMsg* smsg) { +static void uvStartSendResp(SSvrMsg* smsg) { // impl - SSrvConn* pConn = smsg->pConn; + SSvrConn* pConn = smsg->pConn; if (pConn->broken == true) { // persist by @@ -485,7 +485,7 @@ static void uvStartSendResp(SSrvMsg* smsg) { return; } -static void destroySmsg(SSrvMsg* smsg) { +static void destroySmsg(SSvrMsg* smsg) { if (smsg == NULL) { return; } @@ -499,7 +499,7 @@ static void destroyAllConn(SWorkThrdObj* pThrd) { QUEUE_REMOVE(h); QUEUE_INIT(h); - SSrvConn* c = QUEUE_DATA(h, SSrvConn, queue); + SSvrConn* c = QUEUE_DATA(h, SSvrConn, queue); while (T_REF_VAL_GET(c) >= 2) { transUnrefSrvHandle(c); } @@ -509,7 +509,7 @@ static void destroyAllConn(SWorkThrdObj* pThrd) { void uvWorkerAsyncCb(uv_async_t* handle) { SAsyncItem* item = handle->data; SWorkThrdObj* pThrd = item->pThrd; - SSrvConn* conn = NULL; + SSvrConn* conn = NULL; queue wq; // batch process to avoid to lock/unlock frequently @@ -521,7 +521,7 @@ void uvWorkerAsyncCb(uv_async_t* handle) { queue* head = QUEUE_HEAD(&wq); QUEUE_REMOVE(head); - SSrvMsg* msg = QUEUE_DATA(head, SSrvMsg, q); + SSvrMsg* msg = QUEUE_DATA(head, SSvrMsg, q); if (msg == NULL) { tError("unexcept occurred, continue"); continue; @@ -649,7 +649,7 @@ void uvOnConnectionCb(uv_stream_t* q, ssize_t nread, const uv_buf_t* buf) { uv_handle_type pending = uv_pipe_pending_type(pipe); assert(pending == UV_TCP); - SSrvConn* pConn = createConn(pThrd); + SSvrConn* pConn = createConn(pThrd); pConn->pTransInst = pThrd->pTransInst; /* init conn timer*/ @@ -768,10 +768,10 @@ void* transWorkerThread(void* arg) { return NULL; } -static SSrvConn* createConn(void* hThrd) { +static SSvrConn* createConn(void* hThrd) { SWorkThrdObj* pThrd = hThrd; - SSrvConn* pConn = (SSrvConn*)taosMemoryCalloc(1, sizeof(SSrvConn)); + SSvrConn* pConn = (SSvrConn*)taosMemoryCalloc(1, sizeof(SSvrConn)); QUEUE_INIT(&pConn->queue); QUEUE_PUSH(&pThrd->conn, &pConn->queue); @@ -794,7 +794,7 @@ static SSrvConn* createConn(void* hThrd) { return pConn; } -static void destroyConn(SSrvConn* conn, bool clear) { +static void destroyConn(SSvrConn* conn, bool clear) { if (conn == NULL) { return; } @@ -808,13 +808,13 @@ static void destroyConn(SSrvConn* conn, bool clear) { // uv_shutdown(req, (uv_stream_t*)conn->pTcp, uvShutDownCb); } } -static void destroyConnRegArg(SSrvConn* conn) { +static void destroyConnRegArg(SSvrConn* conn) { if (conn->regArg.init == 1) { transFreeMsg(conn->regArg.msg.pCont); conn->regArg.init = 0; } } -static int reallocConnRefHandle(SSrvConn* conn) { +static int reallocConnRefHandle(SSvrConn* conn) { uvReleaseExHandle(conn->refId); uvRemoveExHandle(conn->refId); // avoid app continue to send msg on invalid handle @@ -828,7 +828,7 @@ static int reallocConnRefHandle(SSrvConn* conn) { return 0; } static void uvDestroyConn(uv_handle_t* handle) { - SSrvConn* conn = handle->data; + SSvrConn* conn = handle->data; if (conn == NULL) { return; } @@ -884,7 +884,7 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads, uv_loop_init(srv->loop); taosThreadOnce(&transModuleInit, uvInitEnv); - transSrvInst++; + tranSSvrInst++; assert(0 == uv_pipe_init(srv->loop, &srv->pipeListen, 0)); #ifdef WINDOWS @@ -923,7 +923,7 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads, } if (false == taosValidIpAndPort(srv->ip, srv->port)) { terrno = TAOS_SYSTEM_ERROR(errno); - tError("invalid ip/port, reason: %s", terrstr()); + tError("invalid ip/port, %d:%d, reason: %s", srv->ip, srv->port, terrstr()); goto End; } if (false == addHandleToAcceptloop(srv)) { @@ -981,7 +981,7 @@ void uvDestoryExHandle(void* handle) { taosMemoryFree(handle); } -void uvHandleQuit(SSrvMsg* msg, SWorkThrdObj* thrd) { +void uvHandleQuit(SSvrMsg* msg, SWorkThrdObj* thrd) { thrd->quit = true; if (QUEUE_IS_EMPTY(&thrd->conn)) { uv_walk(thrd->loop, uvWalkCb, NULL); @@ -990,8 +990,8 @@ void uvHandleQuit(SSrvMsg* msg, SWorkThrdObj* thrd) { } taosMemoryFree(msg); } -void uvHandleRelease(SSrvMsg* msg, SWorkThrdObj* thrd) { - SSrvConn* conn = msg->pConn; +void uvHandleRelease(SSvrMsg* msg, SWorkThrdObj* thrd) { + SSvrConn* conn = msg->pConn; if (conn->status == ConnAcquire) { reallocConnRefHandle(conn); if (!transQueuePush(&conn->srvMsgs, msg)) { @@ -1004,13 +1004,13 @@ void uvHandleRelease(SSrvMsg* msg, SWorkThrdObj* thrd) { } destroySmsg(msg); } -void uvHandleResp(SSrvMsg* msg, SWorkThrdObj* thrd) { +void uvHandleResp(SSvrMsg* msg, SWorkThrdObj* thrd) { // send msg to client tDebug("server conn %p start to send resp (2/2)", msg->pConn); uvStartSendResp(msg); } -void uvHandleRegister(SSrvMsg* msg, SWorkThrdObj* thrd) { - SSrvConn* conn = msg->pConn; +void uvHandleRegister(SSvrMsg* msg, SWorkThrdObj* thrd) { + SSvrConn* conn = msg->pConn; tDebug("server conn %p register brokenlink callback", conn); if (conn->status == ConnAcquire) { if (!transQueuePush(&conn->srvMsgs, msg)) { @@ -1036,12 +1036,13 @@ void destroyWorkThrd(SWorkThrdObj* pThrd) { } taosThreadJoin(pThrd->thread, NULL); SRV_RELEASE_UV(pThrd->loop); + TRANS_DESTROY_ASYNC_POOL_MSG(pThrd->asyncPool, SSvrMsg, destroySmsg); transDestroyAsyncPool(pThrd->asyncPool); taosMemoryFree(pThrd->loop); taosMemoryFree(pThrd); } void sendQuitToWorkThrd(SWorkThrdObj* pThrd) { - SSrvMsg* msg = taosMemoryCalloc(1, sizeof(SSrvMsg)); + SSvrMsg* msg = taosMemoryCalloc(1, sizeof(SSvrMsg)); msg->type = Quit; tDebug("server send quit msg to work thread"); transSendAsync(pThrd->asyncPool, &msg->q); @@ -1074,8 +1075,8 @@ void transCloseServer(void* arg) { taosMemoryFree(srv); - transSrvInst--; - if (transSrvInst == 0) { + tranSSvrInst--; + if (tranSSvrInst == 0) { TdThreadOnce tmpInit = PTHREAD_ONCE_INIT; memcpy(&transModuleInit, &tmpInit, sizeof(TdThreadOnce)); uvCloseExHandleMgt(); @@ -1086,7 +1087,7 @@ void transRefSrvHandle(void* handle) { if (handle == NULL) { return; } - int ref = T_REF_INC((SSrvConn*)handle); + int ref = T_REF_INC((SSvrConn*)handle); tDebug("server conn %p ref count: %d", handle, ref); } @@ -1094,10 +1095,10 @@ void transUnrefSrvHandle(void* handle) { if (handle == NULL) { return; } - int ref = T_REF_DEC((SSrvConn*)handle); + int ref = T_REF_DEC((SSvrConn*)handle); tDebug("server conn %p ref count: %d", handle, ref); if (ref == 0) { - destroyConn((SSrvConn*)handle, true); + destroyConn((SSvrConn*)handle, true); } } @@ -1112,12 +1113,12 @@ void transReleaseSrvHandle(void* handle) { STransMsg tmsg = {.code = 0, .info.handle = exh, .info.ahandle = NULL, .info.refId = refId}; - SSrvMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSrvMsg)); - srvMsg->msg = tmsg; - srvMsg->type = Release; + SSvrMsg* m = taosMemoryCalloc(1, sizeof(SSvrMsg)); + m->msg = tmsg; + m->type = Release; tTrace("server conn %p start to release", exh->handle); - transSendAsync(pThrd->asyncPool, &srvMsg->q); + transSendAsync(pThrd->asyncPool, &m->q); uvReleaseExHandle(refId); return; _return1: @@ -1140,11 +1141,11 @@ void transSendResponse(const STransMsg* msg) { SWorkThrdObj* pThrd = exh->pThrd; ASYNC_ERR_JRET(pThrd); - SSrvMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSrvMsg)); - srvMsg->msg = tmsg; - srvMsg->type = Normal; + SSvrMsg* m = taosMemoryCalloc(1, sizeof(SSvrMsg)); + m->msg = tmsg; + m->type = Normal; tDebug("server conn %p start to send resp (1/2)", exh->handle); - transSendAsync(pThrd->asyncPool, &srvMsg->q); + transSendAsync(pThrd->asyncPool, &m->q); uvReleaseExHandle(refId); return; _return1: @@ -1168,11 +1169,11 @@ void transRegisterMsg(const STransMsg* msg) { SWorkThrdObj* pThrd = exh->pThrd; ASYNC_ERR_JRET(pThrd); - SSrvMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSrvMsg)); - srvMsg->msg = tmsg; - srvMsg->type = Register; + SSvrMsg* m = taosMemoryCalloc(1, sizeof(SSvrMsg)); + m->msg = tmsg; + m->type = Register; tTrace("server conn %p start to register brokenlink callback", exh->handle); - transSendAsync(pThrd->asyncPool, &srvMsg->q); + transSendAsync(pThrd->asyncPool, &m->q); uvReleaseExHandle(refId); return; @@ -1192,7 +1193,7 @@ int transGetConnInfo(void* thandle, STransHandleInfo* pInfo) { return -1; } SExHandle* ex = thandle; - SSrvConn* pConn = ex->handle; + SSvrConn* pConn = ex->handle; struct sockaddr_in addr = pConn->addr; pInfo->clientIp = (uint32_t)(addr.sin_addr.s_addr); diff --git a/source/libs/transport/test/CMakeLists.txt b/source/libs/transport/test/CMakeLists.txt index 98a252e008d85b27206fa58055f757dd02d64a78..468b70fb711a15a83c97a5a45adb68dee3d1c368 100644 --- a/source/libs/transport/test/CMakeLists.txt +++ b/source/libs/transport/test/CMakeLists.txt @@ -111,10 +111,12 @@ target_link_libraries (pushServer ) -add_test( - NAME transUT - COMMAND transUT -) +if(NOT TD_WINDOWS) + add_test( + NAME transUT + COMMAND transUT + ) +endif(NOT TD_WINDOWS) add_test( NAME transUtilUt COMMAND transportTest diff --git a/source/libs/transport/test/rclient.c b/source/libs/transport/test/rclient.c index eea76096ffa6a96e0e8f4ce02e4cb6bf7b6eeb41..55e6dd000a1270889894fe28f0a024164e5255eb 100644 --- a/source/libs/transport/test/rclient.c +++ b/source/libs/transport/test/rclient.c @@ -161,7 +161,7 @@ int main(int argc, char *argv[]) { } } - const char *path = "/tmp/transport/client"; + const char *path = TD_TMP_DIR_PATH "transport/client"; taosRemoveDir(path); taosMkDir(path); tstrncpy(tsLogDir, path, PATH_MAX); diff --git a/source/libs/transport/test/rserver.c b/source/libs/transport/test/rserver.c index 6262b3ae4843703fd301fb8d9675b477bb1e3128..1fd78be77de7575d47e3d2ce70bf6e0908dfec8e 100644 --- a/source/libs/transport/test/rserver.c +++ b/source/libs/transport/test/rserver.c @@ -160,7 +160,7 @@ int main(int argc, char *argv[]) { tsAsyncLog = 0; rpcInit.connType = TAOS_CONN_SERVER; - const char *path = "/tmp/transport/server"; + const char *path = TD_TMP_DIR_PATH "transport/server"; taosRemoveDir(path); taosMkDir(path); tstrncpy(tsLogDir, path, PATH_MAX); diff --git a/source/libs/transport/test/transUT.cpp b/source/libs/transport/test/transUT.cpp index 3f5ef1fb53e1b0a4235b3c851ef6790f09c6c89b..25b04e769cfe248046fc8e080d1775c331ddcdcd 100644 --- a/source/libs/transport/test/transUT.cpp +++ b/source/libs/transport/test/transUT.cpp @@ -43,7 +43,7 @@ static void processResp(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet); class Client { public: void Init(int nThread) { - memcpy(tsTempDir, "/tmp", strlen("/tmp")); + memcpy(tsTempDir, TD_TMP_DIR_PATH, strlen(TD_TMP_DIR_PATH)); memset(&rpcInit_, 0, sizeof(rpcInit_)); rpcInit_.localPort = 0; rpcInit_.label = (char *)label; @@ -105,7 +105,7 @@ class Client { class Server { public: Server() { - memcpy(tsTempDir, "/tmp", strlen("/tmp")); + memcpy(tsTempDir, TD_TMP_DIR_PATH, strlen(TD_TMP_DIR_PATH)); memset(&rpcInit_, 0, sizeof(rpcInit_)); memcpy(rpcInit_.localFqdn, "localhost", strlen("localhost")); @@ -219,7 +219,7 @@ static void initEnv() { tsLogEmbedded = 1; tsAsyncLog = 0; - std::string path = "/tmp/transport"; + std::string path = TD_TMP_DIR_PATH "transport"; // taosRemoveDir(path.c_str()); taosMkDir(path.c_str()); diff --git a/source/libs/transport/test/transportTests.cpp b/source/libs/transport/test/transportTests.cpp index a84bd94a00000b9a412b030e223e574a7a5b9794..6c8b30b6e4d5727bd7c0a0f8c6d850fb772262ad 100644 --- a/source/libs/transport/test/transportTests.cpp +++ b/source/libs/transport/test/transportTests.cpp @@ -156,80 +156,80 @@ int32_t cloneVal(void *src, void **dst) { memcpy(*dst, src, sz); return 0; } -TEST_F(TransCtxEnv, mergeTest) { - int key = 1; - { - STransCtx *src = (STransCtx *)taosMemoryCalloc(1, sizeof(STransCtx)); - transCtxInit(src); - { - STransCtxVal val1 = {NULL, NULL, (void (*)(const void *))taosMemoryFree}; - val1.val = taosMemoryMalloc(12); - - taosHashPut(src->args, &key, sizeof(key), &val1, sizeof(val1)); - key++; - } - { - STransCtxVal val1 = {NULL, NULL, (void (*)(const void *))taosMemoryFree}; - val1.val = taosMemoryMalloc(12); - taosHashPut(src->args, &key, sizeof(key), &val1, sizeof(val1)); - key++; - } - transCtxMerge(ctx, src); - taosMemoryFree(src); - } - EXPECT_EQ(2, taosHashGetSize(ctx->args)); - { - STransCtx *src = (STransCtx *)taosMemoryCalloc(1, sizeof(STransCtx)); - transCtxInit(src); - { - STransCtxVal val1 = {NULL, NULL, (void (*)(const void *))taosMemoryFree}; - val1.val = taosMemoryMalloc(12); - - taosHashPut(src->args, &key, sizeof(key), &val1, sizeof(val1)); - key++; - } - { - STransCtxVal val1 = {NULL, NULL, (void (*)(const void *))taosMemoryFree}; - val1.val = taosMemoryMalloc(12); - taosHashPut(src->args, &key, sizeof(key), &val1, sizeof(val1)); - key++; - } - transCtxMerge(ctx, src); - taosMemoryFree(src); - } - std::string val("Hello"); - EXPECT_EQ(4, taosHashGetSize(ctx->args)); - { - key = 1; - STransCtx *src = (STransCtx *)taosMemoryCalloc(1, sizeof(STransCtx)); - transCtxInit(src); - { - STransCtxVal val1 = {NULL, NULL, (void (*)(const void *))taosMemoryFree}; - val1.val = taosMemoryCalloc(1, 11); - val1.clone = cloneVal; - memcpy(val1.val, val.c_str(), val.size()); - - taosHashPut(src->args, &key, sizeof(key), &val1, sizeof(val1)); - key++; - } - { - STransCtxVal val1 = {NULL, NULL, (void (*)(const void *))taosMemoryFree}; - val1.val = taosMemoryCalloc(1, 11); - val1.clone = cloneVal; - memcpy(val1.val, val.c_str(), val.size()); - taosHashPut(src->args, &key, sizeof(key), &val1, sizeof(val1)); - key++; - } - transCtxMerge(ctx, src); - taosMemoryFree(src); - } - EXPECT_EQ(4, taosHashGetSize(ctx->args)); - - char *skey = (char *)transCtxDumpVal(ctx, 1); - EXPECT_EQ(0, strcmp(skey, val.c_str())); - taosMemoryFree(skey); - - skey = (char *)transCtxDumpVal(ctx, 2); - EXPECT_EQ(0, strcmp(skey, val.c_str())); -} +// TEST_F(TransCtxEnv, mergeTest) { +// int key = 1; +// { +// STransCtx *src = (STransCtx *)taosMemoryCalloc(1, sizeof(STransCtx)); +// transCtxInit(src); +// { +// STransCtxVal val1 = {NULL, NULL, (void (*)(const void *))taosMemoryFree}; +// val1.val = taosMemoryMalloc(12); +// +// taosHashPut(src->args, &key, sizeof(key), &val1, sizeof(val1)); +// key++; +// } +// { +// STransCtxVal val1 = {NULL, NULL, (void (*)(const void *))taosMemoryFree}; +// val1.val = taosMemoryMalloc(12); +// taosHashPut(src->args, &key, sizeof(key), &val1, sizeof(val1)); +// key++; +// } +// transCtxMerge(ctx, src); +// taosMemoryFree(src); +// } +// EXPECT_EQ(2, taosHashGetSize(ctx->args)); +// { +// STransCtx *src = (STransCtx *)taosMemoryCalloc(1, sizeof(STransCtx)); +// transCtxInit(src); +// { +// STransCtxVal val1 = {NULL, NULL, (void (*)(const void *))taosMemoryFree}; +// val1.val = taosMemoryMalloc(12); +// +// taosHashPut(src->args, &key, sizeof(key), &val1, sizeof(val1)); +// key++; +// } +// { +// STransCtxVal val1 = {NULL, NULL, (void (*)(const void *))taosMemoryFree}; +// val1.val = taosMemoryMalloc(12); +// taosHashPut(src->args, &key, sizeof(key), &val1, sizeof(val1)); +// key++; +// } +// transCtxMerge(ctx, src); +// taosMemoryFree(src); +// } +// std::string val("Hello"); +// EXPECT_EQ(4, taosHashGetSize(ctx->args)); +// { +// key = 1; +// STransCtx *src = (STransCtx *)taosMemoryCalloc(1, sizeof(STransCtx)); +// transCtxInit(src); +// { +// STransCtxVal val1 = {NULL, NULL, (void (*)(const void *))taosMemoryFree}; +// val1.val = taosMemoryCalloc(1, 11); +// val1.clone = cloneVal; +// memcpy(val1.val, val.c_str(), val.size()); +// +// taosHashPut(src->args, &key, sizeof(key), &val1, sizeof(val1)); +// key++; +// } +// { +// STransCtxVal val1 = {NULL, NULL, (void (*)(const void *))taosMemoryFree}; +// val1.val = taosMemoryCalloc(1, 11); +// val1.clone = cloneVal; +// memcpy(val1.val, val.c_str(), val.size()); +// taosHashPut(src->args, &key, sizeof(key), &val1, sizeof(val1)); +// key++; +// } +// transCtxMerge(ctx, src); +// taosMemoryFree(src); +// } +// EXPECT_EQ(4, taosHashGetSize(ctx->args)); +// +// char *skey = (char *)transCtxDumpVal(ctx, 1); +// EXPECT_EQ(0, strcmp(skey, val.c_str())); +// taosMemoryFree(skey); +// +// skey = (char *)transCtxDumpVal(ctx, 2); +// EXPECT_EQ(0, strcmp(skey, val.c_str())); +//} #endif diff --git a/source/libs/wal/src/walMgmt.c b/source/libs/wal/src/walMgmt.c index ada1f599f231a9a9e2092fbc68637d13e33aa8ff..71cd6de73f005cbc710caf0a22805d60495f55ee 100644 --- a/source/libs/wal/src/walMgmt.c +++ b/source/libs/wal/src/walMgmt.c @@ -14,17 +14,17 @@ */ #define _DEFAULT_SOURCE -#include "tcompare.h" #include "os.h" #include "taoserror.h" +#include "tcompare.h" #include "tref.h" #include "walInt.h" typedef struct { - int8_t stop; - int8_t inited; - uint32_t seq; - int32_t refSetId; + int8_t stop; + int8_t inited; + uint32_t seq; + int32_t refSetId; TdThread thread; } SWalMgmt; @@ -36,30 +36,42 @@ static void walFreeObj(void *pWal); int64_t walGetSeq() { return (int64_t)atomic_load_32(&tsWal.seq); } int32_t walInit() { - int8_t old = atomic_val_compare_exchange_8(&tsWal.inited, 0, 1); - if (old == 1) return 0; + int8_t old; + while (1) { + old = atomic_val_compare_exchange_8(&tsWal.inited, 0, 2); + if (old != 2) break; + } - tsWal.refSetId = taosOpenRef(TSDB_MIN_VNODES, walFreeObj); + if (old == 0) { + tsWal.refSetId = taosOpenRef(TSDB_MIN_VNODES, walFreeObj); - int32_t code = walCreateThread(); - if (code != 0) { - wError("failed to init wal module since %s", tstrerror(code)); - atomic_store_8(&tsWal.inited, 0); - return code; + int32_t code = walCreateThread(); + if (code != 0) { + wError("failed to init wal module since %s", tstrerror(code)); + atomic_store_8(&tsWal.inited, 0); + return code; + } + + wInfo("wal module is initialized, rsetId:%d", tsWal.refSetId); + atomic_store_8(&tsWal.inited, 1); } - wInfo("wal module is initialized, rsetId:%d", tsWal.refSetId); return 0; } void walCleanUp() { - int8_t old = atomic_val_compare_exchange_8(&tsWal.inited, 1, 0); - if (old == 0) { - return; + int8_t old; + while (1) { + old = atomic_val_compare_exchange_8(&tsWal.inited, 1, 2); + if (old != 2) break; + } + + if (old == 1) { + walStopThread(); + taosCloseRef(tsWal.refSetId); + wInfo("wal module is cleaned up"); + atomic_store_8(&tsWal.inited, 0); } - walStopThread(); - taosCloseRef(tsWal.refSetId); - wInfo("wal module is cleaned up"); } SWal *walOpen(const char *path, SWalCfg *pCfg) { @@ -126,7 +138,6 @@ SWal *walOpen(const char *path, SWalCfg *pCfg) { } if (walCheckAndRepairIdx(pWal) < 0) { - } wDebug("vgId:%d, wal:%p is opened, level:%d fsyncPeriod:%d", pWal->cfg.vgId, pWal, pWal->cfg.level, diff --git a/source/libs/wal/src/walWrite.c b/source/libs/wal/src/walWrite.c index 2ddcb27835b5276026d46ff2d46150b7fc9995a7..d2a43c410708249983295dca44ca06f6f75a2b70 100644 --- a/source/libs/wal/src/walWrite.c +++ b/source/libs/wal/src/walWrite.c @@ -121,6 +121,8 @@ int32_t walRollback(SWal *pWal, int64_t ver) { pWal->vers.lastVer = ver - 1; ((SWalFileInfo *)taosArrayGetLast(pWal->fileInfoSet))->lastVer = ver - 1; ((SWalFileInfo *)taosArrayGetLast(pWal->fileInfoSet))->fileSize = entry.offset; + taosCloseFile(&pIdxTFile); + taosCloseFile(&pLogTFile); // unlock taosThreadMutexUnlock(&pWal->mutex); diff --git a/source/libs/wal/test/walMetaTest.cpp b/source/libs/wal/test/walMetaTest.cpp index 18345699b2f5fa55d8356cab7808a09837f61a2e..b1c673e87bfe702f2111c48713fd809199f2520e 100644 --- a/source/libs/wal/test/walMetaTest.cpp +++ b/source/libs/wal/test/walMetaTest.cpp @@ -37,7 +37,7 @@ class WalCleanEnv : public ::testing::Test { } SWal* pWal = NULL; - const char* pathName = "/tmp/wal_test"; + const char* pathName = TD_TMP_DIR_PATH "wal_test"; }; class WalCleanDeleteEnv : public ::testing::Test { @@ -67,7 +67,7 @@ class WalCleanDeleteEnv : public ::testing::Test { } SWal* pWal = NULL; - const char* pathName = "/tmp/wal_test"; + const char* pathName = TD_TMP_DIR_PATH "wal_test"; }; class WalKeepEnv : public ::testing::Test { @@ -104,7 +104,7 @@ class WalKeepEnv : public ::testing::Test { } SWal* pWal = NULL; - const char* pathName = "/tmp/wal_test"; + const char* pathName = TD_TMP_DIR_PATH "wal_test"; }; class WalRetentionEnv : public ::testing::Test { @@ -141,7 +141,7 @@ class WalRetentionEnv : public ::testing::Test { } SWal* pWal = NULL; - const char* pathName = "/tmp/wal_test"; + const char* pathName = TD_TMP_DIR_PATH "wal_test"; }; TEST_F(WalCleanEnv, createNew) { @@ -325,6 +325,7 @@ TEST_F(WalKeepEnv, readHandleRead) { EXPECT_EQ(newStr[j], pRead->pHead->head.body[j]); } } + walCloseReadHandle(pRead); } TEST_F(WalRetentionEnv, repairMeta1) { diff --git a/source/os/CMakeLists.txt b/source/os/CMakeLists.txt index 90b8e9dd8aca8d3ceaee32dc358225d60cf029b3..b6e131d4ccc670f0d3b35e00483f33f072a314e2 100644 --- a/source/os/CMakeLists.txt +++ b/source/os/CMakeLists.txt @@ -18,14 +18,16 @@ if(USE_TD_MEMORY) add_definitions(-DUSE_TD_MEMORY) endif () if(BUILD_ADDR2LINE) - target_include_directories( - os - PUBLIC "${TD_SOURCE_DIR}/contrib/libdwarf/src/lib/libdwarf" - ) + if(NOT TD_WINDOWS) + target_include_directories( + os + PUBLIC "${TD_SOURCE_DIR}/contrib/libdwarf/src/lib/libdwarf" + ) + target_link_libraries( + os PUBLIC addr2line dl z + ) + endif() add_definitions(-DUSE_ADDR2LINE) - target_link_libraries( - os PUBLIC addr2line dl z - ) endif () if(CHECK_STR2INT_ERROR) add_definitions(-DTD_CHECK_STR_TO_INT_ERROR) diff --git a/source/os/src/osDir.c b/source/os/src/osDir.c index 72654d008443a7e42c41a7381d0ad936a41aee2f..cfb7b8a0e255cf32301984f9135f2d4711144d74 100644 --- a/source/os/src/osDir.c +++ b/source/os/src/osDir.c @@ -91,7 +91,12 @@ void taosRemoveDir(const char *dirname) { bool taosDirExist(const char *dirname) { return taosCheckExistFile(dirname); } int32_t taosMkDir(const char *dirname) { + if (taosDirExist(dirname)) return 0; +#ifdef WINDOWS + int32_t code = _mkdir(dirname, 0755); +#else int32_t code = mkdir(dirname, 0755); +#endif if (code < 0 && errno == EEXIST) { return 0; } @@ -101,36 +106,49 @@ int32_t taosMkDir(const char *dirname) { int32_t taosMulMkDir(const char *dirname) { if (dirname == NULL) return -1; - char * temp = strdup(dirname); + char temp[1024]; char * pos = temp; int32_t code = 0; +#ifdef WINDOWS + taosRealPath(dirname, temp, sizeof(temp)); + if (temp[1] == ':') pos += 3; +#else + strcpy(temp, dirname); +#endif + + if (taosDirExist(temp)) return code; - if (strncmp(temp, "/", 1) == 0) { + if (strncmp(temp, TD_DIRSEP, 1) == 0) { pos += 1; - } else if (strncmp(temp, "./", 2) == 0) { + } else if (strncmp(temp, "." TD_DIRSEP, 2) == 0) { pos += 2; } for (; *pos != '\0'; pos++) { - if (*pos == '/') { + if (*pos == TD_DIRSEP[0]) { *pos = '\0'; + #ifdef WINDOWS + code = _mkdir(temp, 0755); + #else code = mkdir(temp, 0755); + #endif if (code < 0 && errno != EEXIST) { - free(temp); return code; } - *pos = '/'; + *pos = TD_DIRSEP[0]; } } - if (*(pos - 1) != '/') { + if (*(pos - 1) != TD_DIRSEP[0]) { + #ifdef WINDOWS + code = _mkdir(temp, 0755); + #else code = mkdir(temp, 0755); + #endif if (code < 0 && errno != EEXIST) { - free(temp); return code; } } - free(temp); // int32_t code = mkdir(dirname, 0755); if (code < 0 && errno == EEXIST) { @@ -186,7 +204,7 @@ void taosRemoveOldFiles(const char *dirname, int32_t keepDays) { int32_t taosExpandDir(const char *dirname, char *outname, int32_t maxlen) { wordexp_t full_path; if (0 != wordexp(dirname, &full_path, 0)) { - // printf("failed to expand path:%s since %s", dirname, strerror(errno)); + printf("failed to expand path:%s since %s", dirname, strerror(errno)); wordfree(&full_path); return -1; } @@ -233,7 +251,13 @@ char *taosDirName(char *name) { _splitpath(name, Drive1, Dir1, NULL, NULL); size_t dirNameLen = strlen(Drive1) + strlen(Dir1); if (dirNameLen > 0) { - name[dirNameLen] = 0; + if (name[dirNameLen - 1] == '/' || name[dirNameLen - 1] == '\\') { + name[dirNameLen - 1] = 0; + } else { + name[dirNameLen] = 0; + } + } else { + name[0] = 0; } return name; #else diff --git a/source/os/src/osFile.c b/source/os/src/osFile.c index aa64e656382b7b9d90794785ccb0d3a4d791feee..c75cca79f6b82e2989b7199068db297c7b91a1eb 100644 --- a/source/os/src/osFile.c +++ b/source/os/src/osFile.c @@ -69,7 +69,6 @@ void taosGetTmpfilePath(const char *inputTmpDir, const char *fileNamePrefix, cha } strcpy(tmpPath + len, tdengineTmpFileNamePrefix); - strcat(tmpPath, tdengineTmpFileNamePrefix); if (strlen(tmpPath) + strlen(fileNamePrefix) + strlen("-%d-%s") < PATH_MAX) { strcat(tmpPath, fileNamePrefix); strcat(tmpPath, "-%d-%s"); @@ -109,8 +108,11 @@ void taosGetTmpfilePath(const char *inputTmpDir, const char *fileNamePrefix, cha int64_t taosCopyFile(const char *from, const char *to) { #ifdef WINDOWS - assert(0); - return -1; + if (CopyFile(from, to, 0)) { + return 1; + } else { + return -1; + } #else char buffer[4096]; int64_t size = 0; @@ -236,7 +238,7 @@ int32_t taosDevInoFile(TdFilePtr pFile, int64_t *stDev, int64_t *stIno) { void autoDelFileListAdd(const char *path) { return; } -TdFilePtr taosOpenFile(const char *path, int32_t tdFileOptions) { +TdFilePtr taosOpenFile(const char *path, int32_t tdFileOptions) { int fd = -1; FILE *fp = NULL; if (tdFileOptions & TD_FILE_STREAM) { @@ -343,7 +345,11 @@ int64_t taosReadFile(TdFilePtr pFile, void *buf, int64_t count) { char *tbuf = (char *)buf; while (leftbytes > 0) { + #ifdef WINDOWS + readbytes = _read(pFile->fd, (void *)tbuf, (uint32_t)leftbytes); + #else readbytes = read(pFile->fd, (void *)tbuf, (uint32_t)leftbytes); + #endif if (readbytes < 0) { if (errno == EINTR) { continue; @@ -379,10 +385,10 @@ int64_t taosPReadFile(TdFilePtr pFile, void *buf, int64_t count, int64_t offset) #endif assert(pFile->fd >= 0); // Please check if you have closed the file. #ifdef WINDOWS - size_t pos = lseek(pFile->fd, 0, SEEK_CUR); - lseek(pFile->fd, offset, SEEK_SET); - int64_t ret = read(pFile->fd, buf, count); - lseek(pFile->fd, pos, SEEK_SET); + size_t pos = _lseek(pFile->fd, 0, SEEK_CUR); + _lseek(pFile->fd, offset, SEEK_SET); + int64_t ret = _read(pFile->fd, buf, count); + _lseek(pFile->fd, pos, SEEK_SET); #else int64_t ret = pread(pFile->fd, buf, count, offset); #endif @@ -428,7 +434,11 @@ int64_t taosLSeekFile(TdFilePtr pFile, int64_t offset, int32_t whence) { taosThreadRwlockRdlock(&(pFile->rwlock)); #endif assert(pFile->fd >= 0); // Please check if you have closed the file. +#ifdef WINDOWS + int64_t ret = _lseek(pFile->fd, offset, whence); +#else int64_t ret = lseek(pFile->fd, offset, whence); +#endif #if FILE_WITH_LOCK taosThreadRwlockUnlock(&(pFile->rwlock)); #endif @@ -567,12 +577,12 @@ int64_t taosFSendFile(TdFilePtr pFileOut, TdFilePtr pFileIn, int64_t *offset, in #ifdef WINDOWS - lseek(pFileIn->fd, (int32_t)(*offset), 0); + _lseek(pFileIn->fd, (int32_t)(*offset), 0); int64_t writeLen = 0; uint8_t buffer[_SEND_FILE_STEP_] = {0}; for (int64_t len = 0; len < (size - _SEND_FILE_STEP_); len += _SEND_FILE_STEP_) { - size_t rlen = read(pFileIn->fd, (void *)buffer, _SEND_FILE_STEP_); + size_t rlen = _read(pFileIn->fd, (void *)buffer, _SEND_FILE_STEP_); if (rlen <= 0) { return writeLen; } else if (rlen < _SEND_FILE_STEP_) { @@ -586,7 +596,7 @@ int64_t taosFSendFile(TdFilePtr pFileOut, TdFilePtr pFileIn, int64_t *offset, in int64_t remain = size - writeLen; if (remain > 0) { - size_t rlen = read(pFileIn->fd, (void *)buffer, (size_t)remain); + size_t rlen = _read(pFileIn->fd, (void *)buffer, (size_t)remain); if (rlen <= 0) { return writeLen; } else { diff --git a/source/os/src/osMemory.c b/source/os/src/osMemory.c index e3791af618d341d1cd08a5fc46c9d62055be2e13..24bc9d0b4c1c8b73f4b70d2dd3c78d4fa178d06b 100644 --- a/source/os/src/osMemory.c +++ b/source/os/src/osMemory.c @@ -37,6 +37,49 @@ typedef struct TdMemoryInfo { #ifdef WINDOWS #define tstrdup(str) _strdup(str) + +int32_t taosBackTrace(void **buffer, int32_t size) { + int32_t frame = 0; + return frame; +} + +#ifdef USE_ADDR2LINE +#include +#pragma comment(lib, "dbghelp.lib") + +void taosPrintBackTrace() { + #define MAX_STACK_FRAMES 20 + + void *pStack[MAX_STACK_FRAMES]; + + HANDLE process = GetCurrentProcess(); + SymInitialize(process, NULL, TRUE); + WORD frames = CaptureStackBackTrace(1, MAX_STACK_FRAMES, pStack, NULL); + + char buf_tmp[1024]; + for (WORD i = 0; i < frames; ++i) { + DWORD64 address = (DWORD64)(pStack[i]); + + DWORD64 displacementSym = 0; + char buffer[sizeof(SYMBOL_INFO) + MAX_SYM_NAME * sizeof(TCHAR)]; + PSYMBOL_INFO pSymbol = (PSYMBOL_INFO)buffer; + pSymbol->SizeOfStruct = sizeof(SYMBOL_INFO); + pSymbol->MaxNameLen = MAX_SYM_NAME; + + DWORD displacementLine = 0; + IMAGEHLP_LINE64 line; + //SymSetOptions(SYMOPT_LOAD_LINES); + line.SizeOfStruct = sizeof(IMAGEHLP_LINE64); + + if (SymFromAddr(process, address, &displacementSym, pSymbol) && SymGetLineFromAddr64(process, address, &displacementLine, &line)) { + snprintf(buf_tmp,sizeof(buf_tmp),"BackTrace %08" PRId64 " %s:%d %s\n", taosGetSelfPthreadId(), line.FileName, line.LineNumber, pSymbol->Name); + } else { + snprintf(buf_tmp,sizeof(buf_tmp),"BackTrace error: %d\n",GetLastError()); + } + write(1,buf_tmp,strlen(buf_tmp)); + } +} +#endif #else #define tstrdup(str) strdup(str) diff --git a/source/os/src/osSemaphore.c b/source/os/src/osSemaphore.c index 7df4c26afd8b5e08aede623522bf079605445ae6..3b68073c7eba39fbb5434144d06757507f37a559 100644 --- a/source/os/src/osSemaphore.c +++ b/source/os/src/osSemaphore.c @@ -50,10 +50,15 @@ int32_t taosGetAppName(char* name, int32_t* len) { if (sub != NULL) { *sub = '\0'; } - strcpy(name, filepath); + char* end = strrchr(filepath, TD_DIRSEP[0]); + if (end == NULL) { + end = filepath; + } + + strcpy(name, end); if (len != NULL) { - *len = (int32_t)strlen(filepath); + *len = (int32_t)strlen(end); } return 0; @@ -68,9 +73,32 @@ int32_t tsem_wait(tsem_t* sem) { } int32_t tsem_timewait(tsem_t* sem, int64_t nanosecs) { - int ret = 0; - - return ret; + struct timespec ts, rel; + FILETIME ft_before, ft_after; + int rc; + + rel.tv_sec = 0; + rel.tv_nsec = nanosecs; + + GetSystemTimeAsFileTime(&ft_before); + errno = 0; + rc = sem_timedwait(&sem, pthread_win32_getabstime_np(&ts, &rel)); + + /* This should have timed out */ + assert(errno == ETIMEDOUT); + assert(rc != 0); + GetSystemTimeAsFileTime(&ft_after); + // We specified a non-zero wait. Time must advance. + if (ft_before.dwLowDateTime == ft_after.dwLowDateTime && ft_before.dwHighDateTime == ft_after.dwHighDateTime) + { + printf("nanoseconds: %d, rc: %d, errno: %d. before filetime: %d, %d; after filetime: %d, %d\n", + nanosecs, rc, errno, + (int)ft_before.dwLowDateTime, (int)ft_before.dwHighDateTime, + (int)ft_after.dwLowDateTime, (int)ft_after.dwHighDateTime); + printf("time must advance during sem_timedwait."); + return 1; + } + return 0; } #elif defined(_TD_DARWIN_64) diff --git a/source/os/src/osSocket.c b/source/os/src/osSocket.c index 105acb188a4236b6889cfb1c26d08479ea32f387..4a0d9e286629dfb4c788eb489ab41f9c6802d831 100644 --- a/source/os/src/osSocket.c +++ b/source/os/src/osSocket.c @@ -718,7 +718,11 @@ bool taosValidIpAndPort(uint32_t ip, uint16_t port) { bzero((char *)&serverAdd, sizeof(serverAdd)); serverAdd.sin_family = AF_INET; +#ifdef WINDOWS + serverAdd.sin_addr.s_addr = INADDR_ANY; +#else serverAdd.sin_addr.s_addr = ip; +#endif serverAdd.sin_port = (uint16_t)htons(port); if ((fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)) <= 2) { @@ -882,6 +886,16 @@ void taosBlockSIGPIPE() { } uint32_t taosGetIpv4FromFqdn(const char *fqdn) { +#ifdef WINDOWS + // Initialize Winsock + WSADATA wsaData; + int iResult; + iResult = WSAStartup(MAKEWORD(2, 2), &wsaData); + if (iResult != 0) { + // printf("WSAStartup failed: %d\n", iResult); + return 1; + } +#endif struct addrinfo hints = {0}; hints.ai_family = AF_INET; hints.ai_socktype = SOCK_STREAM; @@ -899,12 +913,12 @@ uint32_t taosGetIpv4FromFqdn(const char *fqdn) { } else { #ifdef EAI_SYSTEM if (ret == EAI_SYSTEM) { - // printf("failed to get the ip address, fqdn:%s, since:%s", fqdn, strerror(errno)); + // printf("failed to get the ip address, fqdn:%s, errno:%d, since:%s", fqdn, errno, strerror(errno)); } else { - // printf("failed to get the ip address, fqdn:%s, since:%s", fqdn, gai_strerror(ret)); + // printf("failed to get the ip address, fqdn:%s, ret:%d, since:%s", fqdn, ret, gai_strerror(ret)); } #else - // printf("failed to get the ip address, fqdn:%s, since:%s", fqdn, gai_strerror(ret)); + // printf("failed to get the ip address, fqdn:%s, ret:%d, since:%s", fqdn, ret, gai_strerror(ret)); #endif return 0xFFFFFFFF; } @@ -914,7 +928,7 @@ int32_t taosGetFqdn(char *fqdn) { char hostname[1024]; hostname[1023] = '\0'; if (gethostname(hostname, 1023) == -1) { - printf("failed to get hostname, reason:%s", strerror(errno)); + // printf("failed to get hostname, reason:%s", strerror(errno)); assert(0); return -1; } @@ -932,7 +946,7 @@ int32_t taosGetFqdn(char *fqdn) { #endif // __APPLE__ int32_t ret = getaddrinfo(hostname, NULL, &hints, &result); if (!result) { - printf("failed to get fqdn, code:%d, reason:%s", ret, gai_strerror(ret)); + // printf("failed to get fqdn, code:%d, reason:%s", ret, gai_strerror(ret)); assert(0); return -1; } @@ -979,9 +993,7 @@ void tinet_ntoa(char *ipstr, uint32_t ip) { sprintf(ipstr, "%d.%d.%d.%d", ip & 0xFF, (ip >> 8) & 0xFF, (ip >> 16) & 0xFF, ip >> 24); } -void taosIgnSIGPIPE() { - signal(SIGPIPE, SIG_IGN); -} +void taosIgnSIGPIPE() { signal(SIGPIPE, SIG_IGN); } void taosSetMaskSIGPIPE() { #ifdef WINDOWS diff --git a/source/os/src/osTimezone.c b/source/os/src/osTimezone.c index 872d8e740c651cd494efbe5c783390637e95efe7..dc9527c2f2a052cf2d87b6b6c374019139390beb 100644 --- a/source/os/src/osTimezone.c +++ b/source/os/src/osTimezone.c @@ -32,6 +32,700 @@ #pragma warning(disable : 4091) #include #pragma warning(pop) + +char *win_tz[139][2]={{"China Standard Time", "Asia/Shanghai"}, + {"AUS Central Standard Time", "Australia/Darwin"}, + {"AUS Eastern Standard Time", "Australia/Sydney"}, + {"Afghanistan Standard Time", "Asia/Kabul"}, + {"Alaskan Standard Time", "America/Anchorage"}, + {"Aleutian Standard Time", "America/Adak"}, + {"Altai Standard Time", "Asia/Barnaul"}, + {"Arab Standard Time", "Asia/Riyadh"}, + {"Arabian Standard Time", "Asia/Dubai"}, + {"Arabic Standard Time", "Asia/Baghdad"}, + {"Argentina Standard Time", "America/Buenos_Aires"}, + {"Astrakhan Standard Time", "Europe/Astrakhan"}, + {"Atlantic Standard Time", "America/Halifax"}, + {"Aus Central W. Standard Time", "Australia/Eucla"}, + {"Azerbaijan Standard Time", "Asia/Baku"}, + {"Azores Standard Time", "Atlantic/Azores"}, + {"Bahia Standard Time", "America/Bahia"}, + {"Bangladesh Standard Time", "Asia/Dhaka"}, + {"Belarus Standard Time", "Europe/Minsk"}, + {"Bougainville Standard Time", "Pacific/Bougainville"}, + {"Canada Central Standard Time", "America/Regina"}, + {"Cape Verde Standard Time", "Atlantic/Cape_Verde"}, + {"Caucasus Standard Time", "Asia/Yerevan"}, + {"Cen. Australia Standard Time", "Australia/Adelaide"}, + {"Central America Standard Time", "America/Guatemala"}, + {"Central Asia Standard Time", "Asia/Almaty"}, + {"Central Brazilian Standard Time", "America/Cuiaba"}, + {"Central Europe Standard Time", "Europe/Budapest"}, + {"Central European Standard Time", "Europe/Warsaw"}, + {"Central Pacific Standard Time", "Pacific/Guadalcanal"}, + {"Central Standard Time", "America/Chicago"}, + {"Central Standard Time (Mexico)", "America/Mexico_City"}, + {"Chatham Islands Standard Time", "Pacific/Chatham"}, + {"Cuba Standard Time", "America/Havana"}, + {"Dateline Standard Time", "Etc/GMT+12"}, + {"E. Africa Standard Time", "Africa/Nairobi"}, + {"E. Australia Standard Time", "Australia/Brisbane"}, + {"E. Europe Standard Time", "Europe/Chisinau"}, + {"E. South America Standard Time", "America/Sao_Paulo"}, + {"Easter Island Standard Time", "Pacific/Easter"}, + {"Eastern Standard Time", "America/New_York"}, + {"Eastern Standard Time (Mexico)", "America/Cancun"}, + {"Egypt Standard Time", "Africa/Cairo"}, + {"Ekaterinburg Standard Time", "Asia/Yekaterinburg"}, + {"FLE Standard Time", "Europe/Kiev"}, + {"Fiji Standard Time", "Pacific/Fiji"}, + {"GMT Standard Time", "Europe/London"}, + {"GTB Standard Time", "Europe/Bucharest"}, + {"Georgian Standard Time", "Asia/Tbilisi"}, + {"Greenland Standard Time", "America/Godthab"}, + {"Greenwich Standard Time", "Atlantic/Reykjavik"}, + {"Haiti Standard Time", "America/Port-au-Prince"}, + {"Hawaiian Standard Time", "Pacific/Honolulu"}, + {"India Standard Time", "Asia/Calcutta"}, + {"Iran Standard Time", "Asia/Tehran"}, + {"Israel Standard Time", "Asia/Jerusalem"}, + {"Jordan Standard Time", "Asia/Amman"}, + {"Kaliningrad Standard Time", "Europe/Kaliningrad"}, + {"Korea Standard Time", "Asia/Seoul"}, + {"Libya Standard Time", "Africa/Tripoli"}, + {"Line Islands Standard Time", "Pacific/Kiritimati"}, + {"Lord Howe Standard Time", "Australia/Lord_Howe"}, + {"Magadan Standard Time", "Asia/Magadan"}, + {"Magallanes Standard Time", "America/Punta_Arenas"}, + {"Marquesas Standard Time", "Pacific/Marquesas"}, + {"Mauritius Standard Time", "Indian/Mauritius"}, + {"Middle East Standard Time", "Asia/Beirut"}, + {"Montevideo Standard Time", "America/Montevideo"}, + {"Morocco Standard Time", "Africa/Casablanca"}, + {"Mountain Standard Time", "America/Denver"}, + {"Mountain Standard Time (Mexico)", "America/Chihuahua"}, + {"Myanmar Standard Time", "Asia/Rangoon"}, + {"N. Central Asia Standard Time", "Asia/Novosibirsk"}, + {"Namibia Standard Time", "Africa/Windhoek"}, + {"Nepal Standard Time", "Asia/Katmandu"}, + {"New Zealand Standard Time", "Pacific/Auckland"}, + {"Newfoundland Standard Time", "America/St_Johns"}, + {"Norfolk Standard Time", "Pacific/Norfolk"}, + {"North Asia East Standard Time", "Asia/Irkutsk"}, + {"North Asia Standard Time", "Asia/Krasnoyarsk"}, + {"North Korea Standard Time", "Asia/Pyongyang"}, + {"Omsk Standard Time", "Asia/Omsk"}, + {"Pacific SA Standard Time", "America/Santiago"}, + {"Pacific Standard Time", "America/Los_Angeles"}, + {"Pacific Standard Time (Mexico)", "America/Tijuana"}, + {"Pakistan Standard Time", "Asia/Karachi"}, + {"Paraguay Standard Time", "America/Asuncion"}, + {"Qyzylorda Standard Time", "Asia/Qyzylorda"}, + {"Romance Standard Time", "Europe/Paris"}, + {"Russia Time Zone 10", "Asia/Srednekolymsk"}, + {"Russia Time Zone 11", "Asia/Kamchatka"}, + {"Russia Time Zone 3", "Europe/Samara"}, + {"Russian Standard Time", "Europe/Moscow"}, + {"SA Eastern Standard Time", "America/Cayenne"}, + {"SA Pacific Standard Time", "America/Bogota"}, + {"SA Western Standard Time", "America/La_Paz"}, + {"SE Asia Standard Time", "Asia/Bangkok"}, + {"Saint Pierre Standard Time", "America/Miquelon"}, + {"Sakhalin Standard Time", "Asia/Sakhalin"}, + {"Samoa Standard Time", "Pacific/Apia"}, + {"Sao Tome Standard Time", "Africa/Sao_Tome"}, + {"Saratov Standard Time", "Europe/Saratov"}, + {"Singapore Standard Time", "Asia/Singapore"}, + {"South Africa Standard Time", "Africa/Johannesburg"}, + {"South Sudan Standard Time", "Africa/Juba"}, + {"Sri Lanka Standard Time", "Asia/Colombo"}, + {"Sudan Standard Time", "Africa/Khartoum"}, + {"Syria Standard Time", "Asia/Damascus"}, + {"Taipei Standard Time", "Asia/Taipei"}, + {"Tasmania Standard Time", "Australia/Hobart"}, + {"Tocantins Standard Time", "America/Araguaina"}, + {"Tokyo Standard Time", "Asia/Tokyo"}, + {"Tomsk Standard Time", "Asia/Tomsk"}, + {"Tonga Standard Time", "Pacific/Tongatapu"}, + {"Transbaikal Standard Time", "Asia/Chita"}, + {"Turkey Standard Time", "Europe/Istanbul"}, + {"Turks And Caicos Standard Time", "America/Grand_Turk"}, + {"US Eastern Standard Time", "America/Indianapolis"}, + {"US Mountain Standard Time", "America/Phoenix"}, + {"UTC", "Etc/UTC"}, + {"UTC+12", "Etc/GMT-12"}, + {"UTC+13", "Etc/GMT-13"}, + {"UTC-02", "Etc/GMT+2"}, + {"UTC-08", "Etc/GMT+8"}, + {"UTC-09", "Etc/GMT+9"}, + {"UTC-11", "Etc/GMT+11"}, + {"Ulaanbaatar Standard Time", "Asia/Ulaanbaatar"}, + {"Venezuela Standard Time", "America/Caracas"}, + {"Vladivostok Standard Time", "Asia/Vladivostok"}, + {"Volgograd Standard Time", "Europe/Volgograd"}, + {"W. Australia Standard Time", "Australia/Perth"}, + {"W. Central Africa Standard Time", "Africa/Lagos"}, + {"W. Europe Standard Time", "Europe/Berlin"}, + {"W. Mongolia Standard Time", "Asia/Hovd"}, + {"West Asia Standard Time", "Asia/Tashkent"}, + {"West Bank Standard Time", "Asia/Hebron"}, + {"West Pacific Standard Time", "Pacific/Port_Moresby"}, + {"Yakutsk Standard Time", "Asia/Yakutsk"}, + {"Yukon Standard Time", "America/Whitehorse"}}; +char *tz_win[554][2]={{"Asia/Shanghai", "China Standard Time"}, +{"Africa/Abidjan", "Greenwich Standard Time"}, +{"Africa/Accra", "Greenwich Standard Time"}, +{"Africa/Addis_Ababa", "E. Africa Standard Time"}, +{"Africa/Algiers", "W. Central Africa Standard Time"}, +{"Africa/Asmera", "E. Africa Standard Time"}, +{"Africa/Bamako", "Greenwich Standard Time"}, +{"Africa/Bangui", "W. Central Africa Standard Time"}, +{"Africa/Banjul", "Greenwich Standard Time"}, +{"Africa/Bissau", "Greenwich Standard Time"}, +{"Africa/Blantyre", "South Africa Standard Time"}, +{"Africa/Brazzaville", "W. Central Africa Standard Time"}, +{"Africa/Bujumbura", "South Africa Standard Time"}, +{"Africa/Cairo", "Egypt Standard Time"}, +{"Africa/Casablanca", "Morocco Standard Time"}, +{"Africa/Ceuta", "Romance Standard Time"}, +{"Africa/Conakry", "Greenwich Standard Time"}, +{"Africa/Dakar", "Greenwich Standard Time"}, +{"Africa/Dar_es_Salaam", "E. Africa Standard Time"}, +{"Africa/Djibouti", "E. Africa Standard Time"}, +{"Africa/Douala", "W. Central Africa Standard Time"}, +{"Africa/El_Aaiun", "Morocco Standard Time"}, +{"Africa/Freetown", "Greenwich Standard Time"}, +{"Africa/Gaborone", "South Africa Standard Time"}, +{"Africa/Harare", "South Africa Standard Time"}, +{"Africa/Johannesburg", "South Africa Standard Time"}, +{"Africa/Juba", "South Sudan Standard Time"}, +{"Africa/Kampala", "E. Africa Standard Time"}, +{"Africa/Khartoum", "Sudan Standard Time"}, +{"Africa/Kigali", "South Africa Standard Time"}, +{"Africa/Kinshasa", "W. Central Africa Standard Time"}, +{"Africa/Lagos", "W. Central Africa Standard Time"}, +{"Africa/Libreville", "W. Central Africa Standard Time"}, +{"Africa/Lome", "Greenwich Standard Time"}, +{"Africa/Luanda", "W. Central Africa Standard Time"}, +{"Africa/Lubumbashi", "South Africa Standard Time"}, +{"Africa/Lusaka", "South Africa Standard Time"}, +{"Africa/Malabo", "W. Central Africa Standard Time"}, +{"Africa/Maputo", "South Africa Standard Time"}, +{"Africa/Maseru", "South Africa Standard Time"}, +{"Africa/Mbabane", "South Africa Standard Time"}, +{"Africa/Mogadishu", "E. Africa Standard Time"}, +{"Africa/Monrovia", "Greenwich Standard Time"}, +{"Africa/Nairobi", "E. Africa Standard Time"}, +{"Africa/Ndjamena", "W. Central Africa Standard Time"}, +{"Africa/Niamey", "W. Central Africa Standard Time"}, +{"Africa/Nouakchott", "Greenwich Standard Time"}, +{"Africa/Ouagadougou", "Greenwich Standard Time"}, +{"Africa/Porto-Novo", "W. Central Africa Standard Time"}, +{"Africa/Sao_Tome", "Sao Tome Standard Time"}, +{"Africa/Timbuktu", "Greenwich Standard Time"}, +{"Africa/Tripoli", "Libya Standard Time"}, +{"Africa/Tunis", "W. Central Africa Standard Time"}, +{"Africa/Windhoek", "Namibia Standard Time"}, +{"America/Adak", "Aleutian Standard Time"}, +{"America/Anchorage", "Alaskan Standard Time"}, +{"America/Anguilla", "SA Western Standard Time"}, +{"America/Antigua", "SA Western Standard Time"}, +{"America/Araguaina", "Tocantins Standard Time"}, +{"America/Argentina/La_Rioja", "Argentina Standard Time"}, +{"America/Argentina/Rio_Gallegos", "Argentina Standard Time"}, +{"America/Argentina/Salta", "Argentina Standard Time"}, +{"America/Argentina/San_Juan", "Argentina Standard Time"}, +{"America/Argentina/San_Luis", "Argentina Standard Time"}, +{"America/Argentina/Tucuman", "Argentina Standard Time"}, +{"America/Argentina/Ushuaia", "Argentina Standard Time"}, +{"America/Aruba", "SA Western Standard Time"}, +{"America/Asuncion", "Paraguay Standard Time"}, +{"America/Atka", "Aleutian Standard Time"}, +{"America/Bahia", "Bahia Standard Time"}, +{"America/Bahia_Banderas", "Central Standard Time (Mexico)"}, +{"America/Barbados", "SA Western Standard Time"}, +{"America/Belem", "SA Eastern Standard Time"}, +{"America/Belize", "Central America Standard Time"}, +{"America/Blanc-Sablon", "SA Western Standard Time"}, +{"America/Boa_Vista", "SA Western Standard Time"}, +{"America/Bogota", "SA Pacific Standard Time"}, +{"America/Boise", "Mountain Standard Time"}, +{"America/Buenos_Aires", "Argentina Standard Time"}, +{"America/Cambridge_Bay", "Mountain Standard Time"}, +{"America/Campo_Grande", "Central Brazilian Standard Time"}, +{"America/Cancun", "Eastern Standard Time (Mexico)"}, +{"America/Caracas", "Venezuela Standard Time"}, +{"America/Catamarca", "Argentina Standard Time"}, +{"America/Cayenne", "SA Eastern Standard Time"}, +{"America/Cayman", "SA Pacific Standard Time"}, +{"America/Chicago", "Central Standard Time"}, +{"America/Chihuahua", "Mountain Standard Time (Mexico)"}, +{"America/Coral_Harbour", "SA Pacific Standard Time"}, +{"America/Cordoba", "Argentina Standard Time"}, +{"America/Costa_Rica", "Central America Standard Time"}, +{"America/Creston", "US Mountain Standard Time"}, +{"America/Cuiaba", "Central Brazilian Standard Time"}, +{"America/Curacao", "SA Western Standard Time"}, +{"America/Danmarkshavn", "Greenwich Standard Time"}, +{"America/Dawson", "Yukon Standard Time"}, +{"America/Dawson_Creek", "US Mountain Standard Time"}, +{"America/Denver", "Mountain Standard Time"}, +{"America/Detroit", "Eastern Standard Time"}, +{"America/Dominica", "SA Western Standard Time"}, +{"America/Edmonton", "Mountain Standard Time"}, +{"America/Eirunepe", "SA Pacific Standard Time"}, +{"America/El_Salvador", "Central America Standard Time"}, +{"America/Ensenada", "Pacific Standard Time (Mexico)"}, +{"America/Fort_Nelson", "US Mountain Standard Time"}, +{"America/Fortaleza", "SA Eastern Standard Time"}, +{"America/Glace_Bay", "Atlantic Standard Time"}, +{"America/Godthab", "Greenland Standard Time"}, +{"America/Goose_Bay", "Atlantic Standard Time"}, +{"America/Grand_Turk", "Turks And Caicos Standard Time"}, +{"America/Grenada", "SA Western Standard Time"}, +{"America/Guadeloupe", "SA Western Standard Time"}, +{"America/Guatemala", "Central America Standard Time"}, +{"America/Guayaquil", "SA Pacific Standard Time"}, +{"America/Guyana", "SA Western Standard Time"}, +{"America/Halifax", "Atlantic Standard Time"}, +{"America/Havana", "Cuba Standard Time"}, +{"America/Hermosillo", "US Mountain Standard Time"}, +{"America/Indiana/Knox", "Central Standard Time"}, +{"America/Indiana/Marengo", "US Eastern Standard Time"}, +{"America/Indiana/Petersburg", "Eastern Standard Time"}, +{"America/Indiana/Tell_City", "Central Standard Time"}, +{"America/Indiana/Vevay", "US Eastern Standard Time"}, +{"America/Indiana/Vincennes", "Eastern Standard Time"}, +{"America/Indiana/Winamac", "Eastern Standard Time"}, +{"America/Indianapolis", "US Eastern Standard Time"}, +{"America/Inuvik", "Mountain Standard Time"}, +{"America/Iqaluit", "Eastern Standard Time"}, +{"America/Jamaica", "SA Pacific Standard Time"}, +{"America/Jujuy", "Argentina Standard Time"}, +{"America/Juneau", "Alaskan Standard Time"}, +{"America/Kentucky/Monticello", "Eastern Standard Time"}, +{"America/Knox_IN", "Central Standard Time"}, +{"America/Kralendijk", "SA Western Standard Time"}, +{"America/La_Paz", "SA Western Standard Time"}, +{"America/Lima", "SA Pacific Standard Time"}, +{"America/Los_Angeles", "Pacific Standard Time"}, +{"America/Louisville", "Eastern Standard Time"}, +{"America/Lower_Princes", "SA Western Standard Time"}, +{"America/Maceio", "SA Eastern Standard Time"}, +{"America/Managua", "Central America Standard Time"}, +{"America/Manaus", "SA Western Standard Time"}, +{"America/Marigot", "SA Western Standard Time"}, +{"America/Martinique", "SA Western Standard Time"}, +{"America/Matamoros", "Central Standard Time"}, +{"America/Mazatlan", "Mountain Standard Time (Mexico)"}, +{"America/Mendoza", "Argentina Standard Time"}, +{"America/Menominee", "Central Standard Time"}, +{"America/Merida", "Central Standard Time (Mexico)"}, +{"America/Metlakatla", "Alaskan Standard Time"}, +{"America/Mexico_City", "Central Standard Time (Mexico)"}, +{"America/Miquelon", "Saint Pierre Standard Time"}, +{"America/Moncton", "Atlantic Standard Time"}, +{"America/Monterrey", "Central Standard Time (Mexico)"}, +{"America/Montevideo", "Montevideo Standard Time"}, +{"America/Montreal", "Eastern Standard Time"}, +{"America/Montserrat", "SA Western Standard Time"}, +{"America/Nassau", "Eastern Standard Time"}, +{"America/New_York", "Eastern Standard Time"}, +{"America/Nipigon", "Eastern Standard Time"}, +{"America/Nome", "Alaskan Standard Time"}, +{"America/Noronha", "UTC-02"}, +{"America/North_Dakota/Beulah", "Central Standard Time"}, +{"America/North_Dakota/Center", "Central Standard Time"}, +{"America/North_Dakota/New_Salem", "Central Standard Time"}, +{"America/Ojinaga", "Mountain Standard Time"}, +{"America/Panama", "SA Pacific Standard Time"}, +{"America/Pangnirtung", "Eastern Standard Time"}, +{"America/Paramaribo", "SA Eastern Standard Time"}, +{"America/Phoenix", "US Mountain Standard Time"}, +{"America/Port-au-Prince", "Haiti Standard Time"}, +{"America/Port_of_Spain", "SA Western Standard Time"}, +{"America/Porto_Acre", "SA Pacific Standard Time"}, +{"America/Porto_Velho", "SA Western Standard Time"}, +{"America/Puerto_Rico", "SA Western Standard Time"}, +{"America/Punta_Arenas", "Magallanes Standard Time"}, +{"America/Rainy_River", "Central Standard Time"}, +{"America/Rankin_Inlet", "Central Standard Time"}, +{"America/Recife", "SA Eastern Standard Time"}, +{"America/Regina", "Canada Central Standard Time"}, +{"America/Resolute", "Central Standard Time"}, +{"America/Rio_Branco", "SA Pacific Standard Time"}, +{"America/Santa_Isabel", "Pacific Standard Time (Mexico)"}, +{"America/Santarem", "SA Eastern Standard Time"}, +{"America/Santiago", "Pacific SA Standard Time"}, +{"America/Santo_Domingo", "SA Western Standard Time"}, +{"America/Sao_Paulo", "E. South America Standard Time"}, +{"America/Scoresbysund", "Azores Standard Time"}, +{"America/Shiprock", "Mountain Standard Time"}, +{"America/Sitka", "Alaskan Standard Time"}, +{"America/St_Barthelemy", "SA Western Standard Time"}, +{"America/St_Johns", "Newfoundland Standard Time"}, +{"America/St_Kitts", "SA Western Standard Time"}, +{"America/St_Lucia", "SA Western Standard Time"}, +{"America/St_Thomas", "SA Western Standard Time"}, +{"America/St_Vincent", "SA Western Standard Time"}, +{"America/Swift_Current", "Canada Central Standard Time"}, +{"America/Tegucigalpa", "Central America Standard Time"}, +{"America/Thule", "Atlantic Standard Time"}, +{"America/Thunder_Bay", "Eastern Standard Time"}, +{"America/Tijuana", "Pacific Standard Time (Mexico)"}, +{"America/Toronto", "Eastern Standard Time"}, +{"America/Tortola", "SA Western Standard Time"}, +{"America/Vancouver", "Pacific Standard Time"}, +{"America/Virgin", "SA Western Standard Time"}, +{"America/Whitehorse", "Yukon Standard Time"}, +{"America/Winnipeg", "Central Standard Time"}, +{"America/Yakutat", "Alaskan Standard Time"}, +{"America/Yellowknife", "Mountain Standard Time"}, +{"Antarctica/Casey", "Central Pacific Standard Time"}, +{"Antarctica/Davis", "SE Asia Standard Time"}, +{"Antarctica/DumontDUrville", "West Pacific Standard Time"}, +{"Antarctica/Macquarie", "Tasmania Standard Time"}, +{"Antarctica/Mawson", "West Asia Standard Time"}, +{"Antarctica/McMurdo", "New Zealand Standard Time"}, +{"Antarctica/Palmer", "SA Eastern Standard Time"}, +{"Antarctica/Rothera", "SA Eastern Standard Time"}, +{"Antarctica/South_Pole", "New Zealand Standard Time"}, +{"Antarctica/Syowa", "E. Africa Standard Time"}, +{"Antarctica/Vostok", "Central Asia Standard Time"}, +{"Arctic/Longyearbyen", "W. Europe Standard Time"}, +{"Asia/Aden", "Arab Standard Time"}, +{"Asia/Almaty", "Central Asia Standard Time"}, +{"Asia/Amman", "Jordan Standard Time"}, +{"Asia/Anadyr", "Russia Time Zone 11"}, +{"Asia/Aqtau", "West Asia Standard Time"}, +{"Asia/Aqtobe", "West Asia Standard Time"}, +{"Asia/Ashgabat", "West Asia Standard Time"}, +{"Asia/Ashkhabad", "West Asia Standard Time"}, +{"Asia/Atyrau", "West Asia Standard Time"}, +{"Asia/Baghdad", "Arabic Standard Time"}, +{"Asia/Bahrain", "Arab Standard Time"}, +{"Asia/Baku", "Azerbaijan Standard Time"}, +{"Asia/Bangkok", "SE Asia Standard Time"}, +{"Asia/Barnaul", "Altai Standard Time"}, +{"Asia/Beirut", "Middle East Standard Time"}, +{"Asia/Bishkek", "Central Asia Standard Time"}, +{"Asia/Brunei", "Singapore Standard Time"}, +{"Asia/Calcutta", "India Standard Time"}, +{"Asia/Chita", "Transbaikal Standard Time"}, +{"Asia/Choibalsan", "Ulaanbaatar Standard Time"}, +{"Asia/Chongqing", "China Standard Time"}, +{"Asia/Chungking", "China Standard Time"}, +{"Asia/Colombo", "Sri Lanka Standard Time"}, +{"Asia/Dacca", "Bangladesh Standard Time"}, +{"Asia/Damascus", "Syria Standard Time"}, +{"Asia/Dhaka", "Bangladesh Standard Time"}, +{"Asia/Dili", "Tokyo Standard Time"}, +{"Asia/Dubai", "Arabian Standard Time"}, +{"Asia/Dushanbe", "West Asia Standard Time"}, +{"Asia/Famagusta", "GTB Standard Time"}, +{"Asia/Gaza", "West Bank Standard Time"}, +{"Asia/Harbin", "China Standard Time"}, +{"Asia/Hebron", "West Bank Standard Time"}, +{"Asia/Hong_Kong", "China Standard Time"}, +{"Asia/Hovd", "W. Mongolia Standard Time"}, +{"Asia/Irkutsk", "North Asia East Standard Time"}, +{"Asia/Jakarta", "SE Asia Standard Time"}, +{"Asia/Jayapura", "Tokyo Standard Time"}, +{"Asia/Jerusalem", "Israel Standard Time"}, +{"Asia/Kabul", "Afghanistan Standard Time"}, +{"Asia/Kamchatka", "Russia Time Zone 11"}, +{"Asia/Karachi", "Pakistan Standard Time"}, +{"Asia/Kashgar", "Central Asia Standard Time"}, +{"Asia/Katmandu", "Nepal Standard Time"}, +{"Asia/Khandyga", "Yakutsk Standard Time"}, +{"Asia/Krasnoyarsk", "North Asia Standard Time"}, +{"Asia/Kuala_Lumpur", "Singapore Standard Time"}, +{"Asia/Kuching", "Singapore Standard Time"}, +{"Asia/Kuwait", "Arab Standard Time"}, +{"Asia/Macao", "China Standard Time"}, +{"Asia/Macau", "China Standard Time"}, +{"Asia/Magadan", "Magadan Standard Time"}, +{"Asia/Makassar", "Singapore Standard Time"}, +{"Asia/Manila", "Singapore Standard Time"}, +{"Asia/Muscat", "Arabian Standard Time"}, +{"Asia/Nicosia", "GTB Standard Time"}, +{"Asia/Novokuznetsk", "North Asia Standard Time"}, +{"Asia/Novosibirsk", "N. Central Asia Standard Time"}, +{"Asia/Omsk", "Omsk Standard Time"}, +{"Asia/Oral", "West Asia Standard Time"}, +{"Asia/Phnom_Penh", "SE Asia Standard Time"}, +{"Asia/Pontianak", "SE Asia Standard Time"}, +{"Asia/Pyongyang", "North Korea Standard Time"}, +{"Asia/Qatar", "Arab Standard Time"}, +{"Asia/Qostanay", "Central Asia Standard Time"}, +{"Asia/Qyzylorda", "Qyzylorda Standard Time"}, +{"Asia/Rangoon", "Myanmar Standard Time"}, +{"Asia/Riyadh", "Arab Standard Time"}, +{"Asia/Saigon", "SE Asia Standard Time"}, +{"Asia/Sakhalin", "Sakhalin Standard Time"}, +{"Asia/Samarkand", "West Asia Standard Time"}, +{"Asia/Seoul", "Korea Standard Time"}, +{"Asia/Singapore", "Singapore Standard Time"}, +{"Asia/Srednekolymsk", "Russia Time Zone 10"}, +{"Asia/Taipei", "Taipei Standard Time"}, +{"Asia/Tashkent", "West Asia Standard Time"}, +{"Asia/Tbilisi", "Georgian Standard Time"}, +{"Asia/Tehran", "Iran Standard Time"}, +{"Asia/Tel_Aviv", "Israel Standard Time"}, +{"Asia/Thimbu", "Bangladesh Standard Time"}, +{"Asia/Thimphu", "Bangladesh Standard Time"}, +{"Asia/Tokyo", "Tokyo Standard Time"}, +{"Asia/Tomsk", "Tomsk Standard Time"}, +{"Asia/Ujung_Pandang", "Singapore Standard Time"}, +{"Asia/Ulaanbaatar", "Ulaanbaatar Standard Time"}, +{"Asia/Ulan_Bator", "Ulaanbaatar Standard Time"}, +{"Asia/Urumqi", "Central Asia Standard Time"}, +{"Asia/Ust-Nera", "Vladivostok Standard Time"}, +{"Asia/Vientiane", "SE Asia Standard Time"}, +{"Asia/Vladivostok", "Vladivostok Standard Time"}, +{"Asia/Yakutsk", "Yakutsk Standard Time"}, +{"Asia/Yekaterinburg", "Ekaterinburg Standard Time"}, +{"Asia/Yerevan", "Caucasus Standard Time"}, +{"Atlantic/Azores", "Azores Standard Time"}, +{"Atlantic/Bermuda", "Atlantic Standard Time"}, +{"Atlantic/Canary", "GMT Standard Time"}, +{"Atlantic/Cape_Verde", "Cape Verde Standard Time"}, +{"Atlantic/Faeroe", "GMT Standard Time"}, +{"Atlantic/Jan_Mayen", "W. Europe Standard Time"}, +{"Atlantic/Madeira", "GMT Standard Time"}, +{"Atlantic/Reykjavik", "Greenwich Standard Time"}, +{"Atlantic/South_Georgia", "UTC-02"}, +{"Atlantic/St_Helena", "Greenwich Standard Time"}, +{"Atlantic/Stanley", "SA Eastern Standard Time"}, +{"Australia/ACT", "AUS Eastern Standard Time"}, +{"Australia/Adelaide", "Cen. Australia Standard Time"}, +{"Australia/Brisbane", "E. Australia Standard Time"}, +{"Australia/Broken_Hill", "Cen. Australia Standard Time"}, +{"Australia/Canberra", "AUS Eastern Standard Time"}, +{"Australia/Currie", "Tasmania Standard Time"}, +{"Australia/Darwin", "AUS Central Standard Time"}, +{"Australia/Eucla", "Aus Central W. Standard Time"}, +{"Australia/Hobart", "Tasmania Standard Time"}, +{"Australia/LHI", "Lord Howe Standard Time"}, +{"Australia/Lindeman", "E. Australia Standard Time"}, +{"Australia/Lord_Howe", "Lord Howe Standard Time"}, +{"Australia/Melbourne", "AUS Eastern Standard Time"}, +{"Australia/NSW", "AUS Eastern Standard Time"}, +{"Australia/North", "AUS Central Standard Time"}, +{"Australia/Perth", "W. Australia Standard Time"}, +{"Australia/Queensland", "E. Australia Standard Time"}, +{"Australia/South", "Cen. Australia Standard Time"}, +{"Australia/Sydney", "AUS Eastern Standard Time"}, +{"Australia/Tasmania", "Tasmania Standard Time"}, +{"Australia/Victoria", "AUS Eastern Standard Time"}, +{"Australia/West", "W. Australia Standard Time"}, +{"Australia/Yancowinna", "Cen. Australia Standard Time"}, +{"Brazil/Acre", "SA Pacific Standard Time"}, +{"Brazil/DeNoronha", "UTC-02"}, +{"Brazil/East", "E. South America Standard Time"}, +{"Brazil/West", "SA Western Standard Time"}, +{"CST6CDT", "Central Standard Time"}, +{"Canada/Atlantic", "Atlantic Standard Time"}, +{"Canada/Central", "Central Standard Time"}, +{"Canada/Eastern", "Eastern Standard Time"}, +{"Canada/Mountain", "Mountain Standard Time"}, +{"Canada/Newfoundland", "Newfoundland Standard Time"}, +{"Canada/Pacific", "Pacific Standard Time"}, +{"Canada/Saskatchewan", "Canada Central Standard Time"}, +{"Canada/Yukon", "Yukon Standard Time"}, +{"Chile/Continental", "Pacific SA Standard Time"}, +{"Chile/EasterIsland", "Easter Island Standard Time"}, +{"Cuba", "Cuba Standard Time"}, +{"EST5EDT", "Eastern Standard Time"}, +{"Egypt", "Egypt Standard Time"}, +{"Eire", "GMT Standard Time"}, +{"Etc/GMT", "UTC"}, +{"Etc/GMT+1", "Cape Verde Standard Time"}, +{"Etc/GMT+10", "Hawaiian Standard Time"}, +{"Etc/GMT+11", "UTC-11"}, +{"Etc/GMT+12", "Dateline Standard Time"}, +{"Etc/GMT+2", "UTC-02"}, +{"Etc/GMT+3", "SA Eastern Standard Time"}, +{"Etc/GMT+4", "SA Western Standard Time"}, +{"Etc/GMT+5", "SA Pacific Standard Time"}, +{"Etc/GMT+6", "Central America Standard Time"}, +{"Etc/GMT+7", "US Mountain Standard Time"}, +{"Etc/GMT+8", "UTC-08"}, +{"Etc/GMT+9", "UTC-09"}, +{"Etc/GMT-1", "W. Central Africa Standard Time"}, +{"Etc/GMT-10", "West Pacific Standard Time"}, +{"Etc/GMT-11", "Central Pacific Standard Time"}, +{"Etc/GMT-12", "UTC+12"}, +{"Etc/GMT-13", "UTC+13"}, +{"Etc/GMT-14", "Line Islands Standard Time"}, +{"Etc/GMT-2", "South Africa Standard Time"}, +{"Etc/GMT-3", "E. Africa Standard Time"}, +{"Etc/GMT-4", "Arabian Standard Time"}, +{"Etc/GMT-5", "West Asia Standard Time"}, +{"Etc/GMT-6", "Central Asia Standard Time"}, +{"Etc/GMT-7", "SE Asia Standard Time"}, +{"Etc/GMT-8", "Singapore Standard Time"}, +{"Etc/GMT-9", "Tokyo Standard Time"}, +{"Etc/UCT", "UTC"}, +{"Etc/UTC", "UTC"}, +{"Europe/Amsterdam", "W. Europe Standard Time"}, +{"Europe/Andorra", "W. Europe Standard Time"}, +{"Europe/Astrakhan", "Astrakhan Standard Time"}, +{"Europe/Athens", "GTB Standard Time"}, +{"Europe/Belfast", "GMT Standard Time"}, +{"Europe/Belgrade", "Central Europe Standard Time"}, +{"Europe/Berlin", "W. Europe Standard Time"}, +{"Europe/Bratislava", "Central Europe Standard Time"}, +{"Europe/Brussels", "Romance Standard Time"}, +{"Europe/Bucharest", "GTB Standard Time"}, +{"Europe/Budapest", "Central Europe Standard Time"}, +{"Europe/Busingen", "W. Europe Standard Time"}, +{"Europe/Chisinau", "E. Europe Standard Time"}, +{"Europe/Copenhagen", "Romance Standard Time"}, +{"Europe/Dublin", "GMT Standard Time"}, +{"Europe/Gibraltar", "W. Europe Standard Time"}, +{"Europe/Guernsey", "GMT Standard Time"}, +{"Europe/Helsinki", "FLE Standard Time"}, +{"Europe/Isle_of_Man", "GMT Standard Time"}, +{"Europe/Istanbul", "Turkey Standard Time"}, +{"Europe/Jersey", "GMT Standard Time"}, +{"Europe/Kaliningrad", "Kaliningrad Standard Time"}, +{"Europe/Kiev", "FLE Standard Time"}, +{"Europe/Kirov", "Russian Standard Time"}, +{"Europe/Lisbon", "GMT Standard Time"}, +{"Europe/Ljubljana", "Central Europe Standard Time"}, +{"Europe/London", "GMT Standard Time"}, +{"Europe/Luxembourg", "W. Europe Standard Time"}, +{"Europe/Madrid", "Romance Standard Time"}, +{"Europe/Malta", "W. Europe Standard Time"}, +{"Europe/Mariehamn", "FLE Standard Time"}, +{"Europe/Minsk", "Belarus Standard Time"}, +{"Europe/Monaco", "W. Europe Standard Time"}, +{"Europe/Moscow", "Russian Standard Time"}, +{"Europe/Oslo", "W. Europe Standard Time"}, +{"Europe/Paris", "Romance Standard Time"}, +{"Europe/Podgorica", "Central Europe Standard Time"}, +{"Europe/Prague", "Central Europe Standard Time"}, +{"Europe/Riga", "FLE Standard Time"}, +{"Europe/Rome", "W. Europe Standard Time"}, +{"Europe/Samara", "Russia Time Zone 3"}, +{"Europe/San_Marino", "W. Europe Standard Time"}, +{"Europe/Sarajevo", "Central European Standard Time"}, +{"Europe/Saratov", "Saratov Standard Time"}, +{"Europe/Simferopol", "Russian Standard Time"}, +{"Europe/Skopje", "Central European Standard Time"}, +{"Europe/Sofia", "FLE Standard Time"}, +{"Europe/Stockholm", "W. Europe Standard Time"}, +{"Europe/Tallinn", "FLE Standard Time"}, +{"Europe/Tirane", "Central Europe Standard Time"}, +{"Europe/Tiraspol", "E. Europe Standard Time"}, +{"Europe/Ulyanovsk", "Astrakhan Standard Time"}, +{"Europe/Uzhgorod", "FLE Standard Time"}, +{"Europe/Vaduz", "W. Europe Standard Time"}, +{"Europe/Vatican", "W. Europe Standard Time"}, +{"Europe/Vienna", "W. Europe Standard Time"}, +{"Europe/Vilnius", "FLE Standard Time"}, +{"Europe/Volgograd", "Volgograd Standard Time"}, +{"Europe/Warsaw", "Central European Standard Time"}, +{"Europe/Zagreb", "Central European Standard Time"}, +{"Europe/Zaporozhye", "FLE Standard Time"}, +{"Europe/Zurich", "W. Europe Standard Time"}, +{"GB", "GMT Standard Time"}, +{"GB-Eire", "GMT Standard Time"}, +{"GMT+0", "UTC"}, +{"GMT-0", "UTC"}, +{"GMT0", "UTC"}, +{"Greenwich", "UTC"}, +{"Hongkong", "China Standard Time"}, +{"Iceland", "Greenwich Standard Time"}, +{"Indian/Antananarivo", "E. Africa Standard Time"}, +{"Indian/Chagos", "Central Asia Standard Time"}, +{"Indian/Christmas", "SE Asia Standard Time"}, +{"Indian/Cocos", "Myanmar Standard Time"}, +{"Indian/Comoro", "E. Africa Standard Time"}, +{"Indian/Kerguelen", "West Asia Standard Time"}, +{"Indian/Mahe", "Mauritius Standard Time"}, +{"Indian/Maldives", "West Asia Standard Time"}, +{"Indian/Mauritius", "Mauritius Standard Time"}, +{"Indian/Mayotte", "E. Africa Standard Time"}, +{"Indian/Reunion", "Mauritius Standard Time"}, +{"Iran", "Iran Standard Time"}, +{"Israel", "Israel Standard Time"}, +{"Jamaica", "SA Pacific Standard Time"}, +{"Japan", "Tokyo Standard Time"}, +{"Kwajalein", "UTC+12"}, +{"Libya", "Libya Standard Time"}, +{"MST7MDT", "Mountain Standard Time"}, +{"Mexico/BajaNorte", "Pacific Standard Time (Mexico)"}, +{"Mexico/BajaSur", "Mountain Standard Time (Mexico)"}, +{"Mexico/General", "Central Standard Time (Mexico)"}, +{"NZ", "New Zealand Standard Time"}, +{"NZ-CHAT", "Chatham Islands Standard Time"}, +{"Navajo", "Mountain Standard Time"}, +{"PRC", "China Standard Time"}, +{"PST8PDT", "Pacific Standard Time"}, +{"Pacific/Apia", "Samoa Standard Time"}, +{"Pacific/Auckland", "New Zealand Standard Time"}, +{"Pacific/Bougainville", "Bougainville Standard Time"}, +{"Pacific/Chatham", "Chatham Islands Standard Time"}, +{"Pacific/Easter", "Easter Island Standard Time"}, +{"Pacific/Efate", "Central Pacific Standard Time"}, +{"Pacific/Enderbury", "UTC+13"}, +{"Pacific/Fakaofo", "UTC+13"}, +{"Pacific/Fiji", "Fiji Standard Time"}, +{"Pacific/Funafuti", "UTC+12"}, +{"Pacific/Galapagos", "Central America Standard Time"}, +{"Pacific/Gambier", "UTC-09"}, +{"Pacific/Guadalcanal", "Central Pacific Standard Time"}, +{"Pacific/Guam", "West Pacific Standard Time"}, +{"Pacific/Honolulu", "Hawaiian Standard Time"}, +{"Pacific/Johnston", "Hawaiian Standard Time"}, +{"Pacific/Kiritimati", "Line Islands Standard Time"}, +{"Pacific/Kosrae", "Central Pacific Standard Time"}, +{"Pacific/Kwajalein", "UTC+12"}, +{"Pacific/Majuro", "UTC+12"}, +{"Pacific/Marquesas", "Marquesas Standard Time"}, +{"Pacific/Midway", "UTC-11"}, +{"Pacific/Nauru", "UTC+12"}, +{"Pacific/Niue", "UTC-11"}, +{"Pacific/Norfolk", "Norfolk Standard Time"}, +{"Pacific/Noumea", "Central Pacific Standard Time"}, +{"Pacific/Pago_Pago", "UTC-11"}, +{"Pacific/Palau", "Tokyo Standard Time"}, +{"Pacific/Pitcairn", "UTC-08"}, +{"Pacific/Ponape", "Central Pacific Standard Time"}, +{"Pacific/Port_Moresby", "West Pacific Standard Time"}, +{"Pacific/Rarotonga", "Hawaiian Standard Time"}, +{"Pacific/Saipan", "West Pacific Standard Time"}, +{"Pacific/Samoa", "UTC-11"}, +{"Pacific/Tahiti", "Hawaiian Standard Time"}, +{"Pacific/Tarawa", "UTC+12"}, +{"Pacific/Tongatapu", "Tonga Standard Time"}, +{"Pacific/Truk", "West Pacific Standard Time"}, +{"Pacific/Wake", "UTC+12"}, +{"Pacific/Wallis", "UTC+12"}, +{"Poland", "Central European Standard Time"}, +{"Portugal", "GMT Standard Time"}, +{"ROC", "Taipei Standard Time"}, +{"ROK", "Korea Standard Time"}, +{"Singapore", "Singapore Standard Time"}, +{"Turkey", "Turkey Standard Time"}, +{"UCT", "UTC"}, +{"US/Alaska", "Alaskan Standard Time"}, +{"US/Aleutian", "Aleutian Standard Time"}, +{"US/Arizona", "US Mountain Standard Time"}, +{"US/Central", "Central Standard Time"}, +{"US/Eastern", "Eastern Standard Time"}, +{"US/Hawaii", "Hawaiian Standard Time"}, +{"US/Indiana-Starke", "Central Standard Time"}, +{"US/Michigan", "Eastern Standard Time"}, +{"US/Mountain", "Mountain Standard Time"}, +{"US/Pacific", "Pacific Standard Time"}, +{"US/Samoa", "UTC-11"}, +{"UTC", "UTC"}, +{"Universal", "UTC"}, +{"W-SU", "Russian Standard Time"}, +{"Zulu", "UTC"}}; #elif defined(_TD_DARWIN_64) #include #include @@ -61,19 +755,33 @@ void taosSetSystemTimezone(const char *inTimezoneStr, char *outTimezoneStr, int8 #ifdef WINDOWS char winStr[TD_LOCALE_LEN * 2]; - sprintf(winStr, "TZ=%s", buf); - putenv(winStr); - tzset(); - /* - * get CURRENT time zone. - * system current time zone is affected by daylight saving time(DST) - * - * e.g., the local time zone of London in DST is GMT+01:00, - * otherwise is GMT+00:00 - */ + memset(winStr, 0, sizeof(winStr)); + for (size_t i = 0; i < 554; i++) { + if (strcmp(tz_win[i][0],buf) == 0) { + char keyPath[100]; + char keyValue[100]; + DWORD keyValueSize = sizeof(keyValue); + sprintf(keyPath, "SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Time Zones\\%s",tz_win[i][1]); + RegGetValue(HKEY_LOCAL_MACHINE, keyPath, "Display", RRF_RT_ANY, NULL, (PVOID)&keyValue, &keyValueSize); + if (keyValueSize > 0) { + keyValue[4] = (keyValue[4] == '+' ? '-' : '+'); + keyValue[10] = 0; + sprintf(winStr, "TZ=%s:00", &(keyValue[1])); + } + break; + } + } + char *p = strchr(inTimezoneStr, '+'); + if (p == NULL) p = strchr(inTimezoneStr, '-'); + if (p == NULL) { + sprintf(winStr, "TZ=UTC+00:00:00"); + } else { + sprintf(winStr, "TZ=UTC%c%c%c:%c%c:00", (p[0] == '+' ? '-' : '+'), p[1], p[2], p[3], p[4]); + } + _putenv(winStr); + _tzset(); #ifdef _MSC_VER #if _MSC_VER >= 1900 - // see https://docs.microsoft.com/en-us/cpp/c-runtime-library/daylight-dstbias-timezone-and-tzname?view=vs-2019 int64_t timezone = _timezone; int32_t daylight = _daylight; char **tzname = _tzname; @@ -83,11 +791,6 @@ void taosSetSystemTimezone(const char *inTimezoneStr, char *outTimezoneStr, int8 int32_t tz = (int32_t)((-timezone * MILLISECOND_PER_SECOND) / MILLISECOND_PER_HOUR); *tsTimezone = tz; tz += daylight; - /* - * format: - * (CST, +0800) - * (BST, +0100) - */ sprintf(outTimezoneStr, "%s (%s, %s%02d00)", buf, tzname[daylight], tz >= 0 ? "+" : "-", abs(tz)); *outDaylight = daylight; @@ -117,14 +820,36 @@ void taosSetSystemTimezone(const char *inTimezoneStr, char *outTimezoneStr, int8 } void taosGetSystemTimezone(char *outTimezoneStr, enum TdTimezone *tsTimezone) { -#ifdef WINDOWS - char *tz = getenv("TZ"); - if (tz == NULL || strlen(tz) == 0) { +#ifdef WINDOWS + char value[100]; + DWORD bufferSize = sizeof(value); + char *buf = getenv("TZ"); + if (buf == NULL || strlen(buf) == 0) { + RegGetValue(HKEY_LOCAL_MACHINE, "SYSTEM\\CurrentControlSet\\Control\\TimeZoneInformation", "TimeZoneKeyName", RRF_RT_ANY, NULL, (PVOID)&value, &bufferSize); strcpy(outTimezoneStr, "not configured"); + if (bufferSize > 0) { + for (size_t i = 0; i < 139; i++) { + if (strcmp(win_tz[i][0],value) == 0) { + strcpy(outTimezoneStr, win_tz[i][1]); + break; + } + } + } } else { - strcpy(outTimezoneStr, tz); + strcpy(outTimezoneStr, buf); } - +#ifdef _MSC_VER +#if _MSC_VER >= 1900 + // see https://docs.microsoft.com/en-us/cpp/c-runtime-library/daylight-dstbias-timezone-and-tzname?view=vs-2019 + int64_t timezone = _timezone; + int32_t daylight = _daylight; + char **tzname = _tzname; +#endif +#endif + int32_t tz = (int32_t)((-timezone * MILLISECOND_PER_SECOND) / MILLISECOND_PER_HOUR); + *tsTimezone = tz; + tz += daylight; + sprintf(outTimezoneStr, "%s (%s, %s%02d00)", outTimezoneStr, tzname[daylight], tz >= 0 ? "+" : "-", abs(tz)); #elif defined(_TD_DARWIN_64) char buf[4096] = {0}; char *tz = NULL; diff --git a/source/util/src/tdigest.c b/source/util/src/tdigest.c new file mode 100644 index 0000000000000000000000000000000000000000..56b113fd8f166aae397e05ef3fed40e4df00309a --- /dev/null +++ b/source/util/src/tdigest.c @@ -0,0 +1,319 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +/* + * src/tdigest.c + * + * Implementation of the t-digest data structure used to compute accurate percentiles. + * + * It is based on the MergingDigest implementation found at: + * https://github.com/tdunning/t-digest/blob/master/src/main/java/com/tdunning/math/stats/MergingDigest.java + * + * Copyright (c) 2016, Usman Masood + */ + +#include "os.h" +#include "osMath.h" +#include "tdigest.h" + +#define INTERPOLATE(x, x0, x1) (((x) - (x0)) / ((x1) - (x0))) +//#define INTEGRATED_LOCATION(compression, q) ((compression) * (asin(2 * (q) - 1) + M_PI / 2) / M_PI) +#define INTEGRATED_LOCATION(compression, q) ((compression) * (asin(2 * (double)(q) - 1)/M_PI + (double)1/2)) +#define FLOAT_EQ(f1, f2) (fabs((f1) - (f2)) <= FLT_EPSILON) + +typedef struct SMergeArgs { + TDigest *t; + SCentroid *centroids; + int32_t idx; + double weight_so_far; + double k1; + double min; + double max; +}SMergeArgs; + +void tdigestAutoFill(TDigest* t, int32_t compression) { + t->centroids = (SCentroid*)((char*)t + sizeof(TDigest)); + t->buffered_pts = (SPt*) ((char*)t + sizeof(TDigest) + sizeof(SCentroid) * (int32_t)GET_CENTROID(compression)); +} + +TDigest *tdigestNewFrom(void* pBuf, int32_t compression) { + memset(pBuf, 0, (size_t)TDIGEST_SIZE(compression)); + TDigest* t = (TDigest*)pBuf; + tdigestAutoFill(t, compression); + + t->compression = compression; + t->size = (int64_t)GET_CENTROID(compression); + t->threshold = (int32_t)GET_THRESHOLD(compression); + t->min = DOUBLE_MAX; + t->max = -DOUBLE_MAX; + + return t; +} + +static int32_t cmpCentroid(const void *a, const void *b) { + SCentroid *c1 = (SCentroid *) a; + SCentroid *c2 = (SCentroid *) b; + if (c1->mean < c2->mean) + return -1; + if (c1->mean > c2->mean) + return 1; + return 0; +} + + +static void mergeCentroid(SMergeArgs *args, SCentroid *merge) { + double k2; + SCentroid *c = &args->centroids[args->idx]; + + args->weight_so_far += merge->weight; + k2 = INTEGRATED_LOCATION(args->t->size, + args->weight_so_far / args->t->total_weight); + //idx++ + if(k2 - args->k1 > 1 && c->weight > 0) { + if(args->idx + 1 < args->t->size + && merge->mean != args->centroids[args->idx].mean) { + args->idx++; + } + args->k1 = k2; + } + + c = &args->centroids[args->idx]; + if(c->mean == merge->mean) { + c->weight += merge->weight; + } else { + c->weight += merge->weight; + c->mean += (merge->mean - c->mean) * merge->weight / c->weight; + + if (merge->weight > 0) { + args->min = TMIN(merge->mean, args->min); + args->max = TMAX(merge->mean, args->max); + } + } +} + +void tdigestCompress(TDigest *t) { + SCentroid *unmerged_centroids; + int64_t unmerged_weight = 0; + int32_t num_unmerged = t->num_buffered_pts; + int32_t i, j; + SMergeArgs args; + + if (t->num_buffered_pts <= 0) + return; + + unmerged_centroids = (SCentroid*)taosMemoryMalloc(sizeof(SCentroid) * t->num_buffered_pts); + for (i = 0; i < num_unmerged; i++) { + SPt *p = t->buffered_pts + i; + SCentroid *c = &unmerged_centroids[i]; + c->mean = p->value; + c->weight = p->weight; + unmerged_weight += c->weight; + } + t->num_buffered_pts = 0; + t->total_weight += unmerged_weight; + + qsort(unmerged_centroids, num_unmerged, sizeof(SCentroid), cmpCentroid); + memset(&args, 0, sizeof(SMergeArgs)); + args.centroids = (SCentroid*)taosMemoryMalloc((size_t)(sizeof(SCentroid) * t->size)); + memset(args.centroids, 0, (size_t)(sizeof(SCentroid) * t->size)); + + args.t = t; + args.min = DOUBLE_MAX; + args.max = -DOUBLE_MAX; + + i = 0; + j = 0; + while (i < num_unmerged && j < t->num_centroids) { + SCentroid *a = &unmerged_centroids[i]; + SCentroid *b = &t->centroids[j]; + + if (a->mean <= b->mean) { + mergeCentroid(&args, a); + assert(args.idx < t->size); + i++; + } else { + mergeCentroid(&args, b); + assert(args.idx < t->size); + j++; + } + } + + while (i < num_unmerged) { + mergeCentroid(&args, &unmerged_centroids[i++]); + assert(args.idx < t->size); + } + taosMemoryFree((void*)unmerged_centroids); + + while (j < t->num_centroids) { + mergeCentroid(&args, &t->centroids[j++]); + assert(args.idx < t->size); + } + + if (t->total_weight > 0) { + t->min = TMIN(t->min, args.min); + if (args.centroids[args.idx].weight <= 0) { + args.idx--; + } + t->num_centroids = args.idx + 1; + t->max = TMAX(t->max, args.max); + } + + memcpy(t->centroids, args.centroids, sizeof(SCentroid) * t->num_centroids); + taosMemoryFree((void*)args.centroids); +} + +void tdigestAdd(TDigest* t, double x, int64_t w) { + if (w == 0) + return; + + int32_t i = t->num_buffered_pts; + if(i > 0 && t->buffered_pts[i-1].value == x ) { + t->buffered_pts[i].weight = w; + } else { + t->buffered_pts[i].value = x; + t->buffered_pts[i].weight = w; + t->num_buffered_pts++; + } + + + if (t->num_buffered_pts >= t->threshold) + tdigestCompress(t); +} + +double tdigestCDF(TDigest *t, double x) { + if (t == NULL) + return 0; + + int32_t i; + double left, right; + int64_t weight_so_far; + SCentroid *a, *b, tmp; + + tdigestCompress(t); + if (t->num_centroids == 0) + return NAN; + if (x < t->min) + return 0; + if (x > t->max) + return 1; + if (t->num_centroids == 1) { + if (FLOAT_EQ(t->max, t->min)) + return 0.5; + + return INTERPOLATE(x, t->min, t->max); + } + + weight_so_far = 0; + a = b = &tmp; + b->mean = t->min; + b->weight = 0; + right = 0; + + for (i = 0; i < t->num_centroids; i++) { + SCentroid *c = &t->centroids[i]; + + left = b->mean - (a->mean + right); + a = b; + b = c; + right = (b->mean - a->mean) * a->weight / (a->weight + b->weight); + + if (x < a->mean + right) { + double cdf = (weight_so_far + + a->weight + * INTERPOLATE(x, a->mean - left, a->mean + right)) + / t->total_weight; + return TMAX(cdf, 0.0); + } + + weight_so_far += a->weight; + } + + left = b->mean - (a->mean + right); + a = b; + right = t->max - a->mean; + + if (x < a->mean + right) { + return (weight_so_far + a->weight * INTERPOLATE(x, a->mean - left, a->mean + right)) + / t->total_weight; + } + + return 1; +} + +double tdigestQuantile(TDigest *t, double q) { + if (t == NULL) + return 0; + + int32_t i; + double left, right, idx; + int64_t weight_so_far; + SCentroid *a, *b, tmp; + + tdigestCompress(t); + if (t->num_centroids == 0) + return NAN; + if (t->num_centroids == 1) + return t->centroids[0].mean; + if (FLOAT_EQ(q, 0.0)) + return t->min; + if (FLOAT_EQ(q, 1.0)) + return t->max; + + idx = q * t->total_weight; + weight_so_far = 0; + b = &tmp; + b->mean = t->min; + b->weight = 0; + right = t->min; + + for (i = 0; i < t->num_centroids; i++) { + SCentroid *c = &t->centroids[i]; + a = b; + left = right; + + b = c; + right = (b->weight * a->mean + a->weight * b->mean)/ (a->weight + b->weight); + if (idx < weight_so_far + a->weight) { + double p = (idx - weight_so_far) / a->weight; + return left * (1 - p) + right * p; + } + weight_so_far += a->weight; + } + + left = right; + a = b; + right = t->max; + + if (idx < weight_so_far + a->weight && a->weight != 0) { + double p = (idx - weight_so_far) / a->weight; + return left * (1 - p) + right * p; + } + + return t->max; +} + +void tdigestMerge(TDigest *t1, TDigest *t2) { + // SPoints + int32_t num_pts = t2->num_buffered_pts; + for(int32_t i = num_pts - 1; i >= 0; i--) { + SPt* p = t2->buffered_pts + i; + tdigestAdd(t1, p->value, p->weight); + t2->num_buffered_pts --; + } + // centroids + for (int32_t i = 0; i < t2->num_centroids; i++) { + tdigestAdd(t1, t2->centroids[i].mean, t2->centroids[i].weight); + } +} diff --git a/source/util/src/tencode.c b/source/util/src/tencode.c index fd898984ed7e411c13d55231c717c03ed7efb9ae..185daf9e45e70c86729b467563625677c32a5e07 100644 --- a/source/util/src/tencode.c +++ b/source/util/src/tencode.c @@ -29,10 +29,10 @@ struct SEncoderNode { }; struct SDecoderNode { - SDecoderNode* pNext; - const uint8_t* data; - uint32_t size; - uint32_t pos; + SDecoderNode* pNext; + uint8_t* data; + uint32_t size; + uint32_t pos; }; void tEncoderInit(SEncoder* pEncoder, uint8_t* data, uint32_t size) { @@ -52,7 +52,7 @@ void tEncoderClear(SEncoder* pCoder) { memset(pCoder, 0, sizeof(*pCoder)); } -void tDecoderInit(SDecoder* pDecoder, const uint8_t* data, uint32_t size) { +void tDecoderInit(SDecoder* pDecoder, uint8_t* data, uint32_t size) { pDecoder->data = data; pDecoder->size = size; pDecoder->pos = 0; diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 3890a55ff16da11d132261c070b809d833f1b2aa..178d6e8d2b48a5adc62b6c5d83dd414050ffa9f1 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -74,6 +74,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_REPEAT_INIT, "Repeat initialization TAOS_DEFINE_ERROR(TSDB_CODE_DUP_KEY, "Cannot add duplicate keys to hash") TAOS_DEFINE_ERROR(TSDB_CODE_NEED_RETRY, "Retry needed") TAOS_DEFINE_ERROR(TSDB_CODE_OUT_OF_RPC_MEMORY_QUEUE, "Out of memory in rpc queue") +TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_TIMESTAMP, "Invalid timestamp format") TAOS_DEFINE_ERROR(TSDB_CODE_REF_NO_MEMORY, "Ref out of memory") TAOS_DEFINE_ERROR(TSDB_CODE_REF_FULL, "too many Ref Objs") @@ -259,6 +260,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_NOT_EXIST, "Transaction not exist TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_INVALID_STAGE, "Invalid stage to kill") TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_CONFLICT, "Conflict transaction not completed") TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_UNKNOW_ERROR, "Unknown transaction error") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_CLOG_IS_NULL, "Transaction commitlog is null") // mnode-mq TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOPIC_ALREADY_EXIST, "Topic already exists") @@ -271,6 +273,10 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_CONSUMER_NOT_EXIST, "Consumer not exist") TAOS_DEFINE_ERROR(TSDB_CODE_MND_CONSUMER_NOT_READY, "Consumer waiting for rebalance") TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOPIC_SUBSCRIBED, "Topic subscribed cannot be dropped") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_STREAM_ALREADY_EXIST, "Stream already exists") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_STREAM_NOT_EXIST, "Stream not exist") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_STREAM_OPTION, "Invalid stream option") + // mnode-sma TAOS_DEFINE_ERROR(TSDB_CODE_MND_SMA_ALREADY_EXIST, "SMA already exists") TAOS_DEFINE_ERROR(TSDB_CODE_MND_SMA_NOT_EXIST, "SMA does not exist") @@ -310,6 +316,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_VND_TABLE_NOT_EXIST, "Table does not exists TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_TABLE_ACTION, "Invalid table action") TAOS_DEFINE_ERROR(TSDB_CODE_VND_COL_ALREADY_EXISTS, "Table column already exists") TAOS_DEFINE_ERROR(TSDB_CODE_VND_TABLE_COL_NOT_EXISTS, "Table column not exists") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_READ_END, "Read end") // tsdb @@ -441,9 +448,10 @@ TAOS_DEFINE_ERROR(TSDB_CODE_QW_MSG_ERROR, "Invalid msg order") // parser TAOS_DEFINE_ERROR(TSDB_CODE_PAR_TABLE_NOT_EXIST, "Table does not exist") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_PERMISSION_DENIED, "Permission denied") +TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INTERNAL_ERROR, "Parser internal error") //planner -TAOS_DEFINE_ERROR(TSDB_CODE_PLAN_INTERNAL_ERROR, "planner internal error") +TAOS_DEFINE_ERROR(TSDB_CODE_PLAN_INTERNAL_ERROR, "Planner internal error") //udf TAOS_DEFINE_ERROR(TSDB_CODE_UDF_STOPPING, "udf is stopping") diff --git a/source/util/src/thash.c b/source/util/src/thash.c index 551c3b67c8642b8bceab70c9cae75aca78f73769..f564ae45b63c0d24ac649cad4ef6ae3ecb907bcd 100644 --- a/source/util/src/thash.c +++ b/source/util/src/thash.c @@ -708,7 +708,7 @@ SHashNode *doCreateHashNode(const void *key, size_t keyLen, const void *pData, s pNewNode->removed = 0; pNewNode->next = NULL; - memcpy(GET_HASH_NODE_DATA(pNewNode), pData, dsize); + if (pData) memcpy(GET_HASH_NODE_DATA(pNewNode), pData, dsize); memcpy(GET_HASH_NODE_KEY(pNewNode), key, keyLen); return pNewNode; @@ -774,7 +774,7 @@ static void *taosHashReleaseNode(SHashObj *pHashObj, void *p, int *slot) { ASSERT(prevNode->next != prevNode); } else { pe->next = pOld->next; - SHashNode* x = pe->next; + SHashNode *x = pe->next; if (x != NULL) { ASSERT(x->next != x); } diff --git a/source/util/src/tlist.c b/source/util/src/tlist.c index 1d17b4a9e17aa7cafdd89ba273770e8751f09066..b1c018805157fe05ef6be97fa7be6df0255d5d5b 100644 --- a/source/util/src/tlist.c +++ b/source/util/src/tlist.c @@ -95,7 +95,7 @@ SListNode *tdListPopTail(SList *list) { SListNode *tdListGetHead(SList *list) { return TD_DLIST_HEAD(list); } -SListNode *tsListGetTail(SList *list) { return TD_DLIST_TAIL(list); } +SListNode *tdListGetTail(SList *list) { return TD_DLIST_TAIL(list); } SListNode *tdListPopNode(SList *list, SListNode *node) { TD_DLIST_POP(list, node); diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c index c1fc2c48c04b1fe42ea886516772ab63eac91556..353e94a49096822fe581d7faa0df8a29a6494c12 100644 --- a/source/util/src/tlog.c +++ b/source/util/src/tlog.c @@ -39,7 +39,7 @@ #define LOG_BUF_MUTEX(x) ((x)->buffMutex) typedef struct { - char *buffer; + char * buffer; int32_t buffStart; int32_t buffEnd; int32_t buffSize; @@ -58,7 +58,7 @@ typedef struct { int32_t openInProgress; pid_t pid; char logName[LOG_FILE_NAME_LEN]; - SLogBuff *logHandle; + SLogBuff * logHandle; TdThreadMutex logMutex; } SLogObj; @@ -96,6 +96,7 @@ int32_t fsDebugFlag = 135; int32_t metaDebugFlag = 135; int32_t fnDebugFlag = 135; int32_t smaDebugFlag = 135; +int32_t idxDebugFlag = 135; int64_t dbgEmptyW = 0; int64_t dbgWN = 0; @@ -103,7 +104,7 @@ int64_t dbgSmallWN = 0; int64_t dbgBigWN = 0; int64_t dbgWSize = 0; -static void *taosAsyncOutputLog(void *param); +static void * taosAsyncOutputLog(void *param); static int32_t taosPushLogBuffer(SLogBuff *pLogBuf, const char *msg, int32_t msgLen); static SLogBuff *taosLogBuffNew(int32_t bufSize); static void taosCloseLogByFd(TdFilePtr pFile); @@ -226,7 +227,7 @@ static void *taosThreadToOpenNewFile(void *param) { tsLogObj.logHandle->pFile = pFile; tsLogObj.lines = 0; tsLogObj.openInProgress = 0; - taosSsleep(10); + taosSsleep(20); taosCloseLogByFd(pOldFile); uInfo(" new log file:%d is opened", tsLogObj.flag); @@ -490,7 +491,7 @@ void taosDumpData(unsigned char *msg, int32_t len) { if (!osLogSpaceAvailable()) return; taosUpdateLogNums(DEBUG_DUMP); - char temp[256]; + char temp[256] = {0}; int32_t i, pos = 0, c = 0; for (i = 0; i < len; ++i) { @@ -701,7 +702,7 @@ int32_t taosCompressFile(char *srcFileName, char *destFileName) { int32_t compressSize = 163840; int32_t ret = 0; int32_t len = 0; - char *data = taosMemoryMalloc(compressSize); + char * data = taosMemoryMalloc(compressSize); // gzFile dstFp = NULL; // srcFp = fopen(srcFileName, "r"); @@ -759,6 +760,7 @@ void taosSetAllDebugFlag(int32_t flag) { fsDebugFlag = flag; fnDebugFlag = flag; smaDebugFlag = flag; + idxDebugFlag = flag; uInfo("all debug flag are set to %d", flag); } diff --git a/source/util/src/tpagedbuf.c b/source/util/src/tpagedbuf.c index 00f123370747fcc29eddbb9ad053514134d3bc8f..101ac78e1847a1db244f7dfe867f94aeec0447d4 100644 --- a/source/util/src/tpagedbuf.c +++ b/source/util/src/tpagedbuf.c @@ -549,11 +549,16 @@ void destroyDiskbasedBuf(SDiskbasedBuf* pBuf) { // print the statistics information { SDiskbasedBufStatis* ps = &pBuf->statis; - uDebug( - "Get/Release pages:%d/%d, flushToDisk:%.2f Kb (%d Pages), loadFromDisk:%.2f Kb (%d Pages), avgPageSize:%.2f " - "Kb\n", - ps->getPages, ps->releasePages, ps->flushBytes / 1024.0f, ps->flushPages, ps->loadBytes / 1024.0f, - ps->loadPages, ps->loadBytes / (1024.0 * ps->loadPages)); + if (ps->loadPages == 0) { + uDebug( + "Get/Release pages:%d/%d, flushToDisk:%.2f Kb (%d Pages), loadFromDisk:%.2f Kb (%d Pages)", + ps->getPages, ps->releasePages, ps->flushBytes / 1024.0f, ps->flushPages, ps->loadBytes / 1024.0f, ps->loadPages); + } else { + uDebug( + "Get/Release pages:%d/%d, flushToDisk:%.2f Kb (%d Pages), loadFromDisk:%.2f Kb (%d Pages), avgPageSize:%.2f Kb", + ps->getPages, ps->releasePages, ps->flushBytes / 1024.0f, ps->flushPages, ps->loadBytes / 1024.0f, + ps->loadPages, ps->loadBytes / (1024.0 * ps->loadPages)); + } } taosRemoveFile(pBuf->path); diff --git a/source/util/src/tqueue.c b/source/util/src/tqueue.c index 6a10794ea154306f3c26b9666482a7c3a5b61958..37935087fad693eed254549977182ccaca1085f2 100644 --- a/source/util/src/tqueue.c +++ b/source/util/src/tqueue.c @@ -26,6 +26,7 @@ typedef struct STaosQnode STaosQnode; typedef struct STaosQnode { STaosQnode *next; STaosQueue *queue; + int64_t timestamp; int32_t size; int8_t itype; int8_t reserved[3]; @@ -144,6 +145,7 @@ void *taosAllocateQitem(int32_t size, EQItype itype) { STaosQnode *pNode = taosMemoryCalloc(1, sizeof(STaosQnode) + size); pNode->size = size; pNode->itype = itype; + pNode->timestamp = taosGetTimestampUs(); if (pNode == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -393,7 +395,7 @@ void taosRemoveFromQset(STaosQset *qset, STaosQueue *queue) { int32_t taosGetQueueNumber(STaosQset *qset) { return qset->numOfQueues; } -int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, void **ahandle, FItem *itemFp) { +int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, int64_t *ts, void **ahandle, FItem *itemFp) { STaosQnode *pNode = NULL; int32_t code = 0; @@ -415,6 +417,7 @@ int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, void **ahandle, FI *ppItem = pNode->item; if (ahandle) *ahandle = queue->ahandle; if (itemFp) *itemFp = queue->itemFp; + if (ts) *ts = pNode->timestamp; queue->head = pNode->next; if (queue->head == NULL) queue->tail = NULL; diff --git a/source/util/src/tstrbuild.c b/source/util/src/tstrbuild.c index 2aae588046402e37569f5a2bde5ed5f72fa24346..c87b889e82ece82c251ddabad1964bc1f0b3ab2f 100644 --- a/source/util/src/tstrbuild.c +++ b/source/util/src/tstrbuild.c @@ -69,13 +69,13 @@ void taosStringBuilderAppendString(SStringBuilder* sb, const char* str) { void taosStringBuilderAppendNull(SStringBuilder* sb) { taosStringBuilderAppendStringLen(sb, "null", 4); } void taosStringBuilderAppendInteger(SStringBuilder* sb, int64_t v) { - char buf[64]; + char buf[64] = {0}; size_t len = snprintf(buf, sizeof(buf), "%" PRId64, v); taosStringBuilderAppendStringLen(sb, buf, TMIN(len, sizeof(buf))); } void taosStringBuilderAppendDouble(SStringBuilder* sb, double v) { - char buf[512]; + char buf[512] = {0}; size_t len = snprintf(buf, sizeof(buf), "%.9lf", v); taosStringBuilderAppendStringLen(sb, buf, TMIN(len, sizeof(buf))); } diff --git a/source/util/src/tutil.c b/source/util/src/tutil.c index adb6a37ba737fbf1ccafe35231853f8a2fc4cfc8..0534eb3462f92a6ae4252efed389b0d309412c01 100644 --- a/source/util/src/tutil.c +++ b/source/util/src/tutil.c @@ -52,7 +52,7 @@ size_t strtrim(char *z) { int32_t j = 0; int32_t delta = 0; - while (z[j] == ' ') { + while (isspace(z[j])) { ++j; } @@ -65,9 +65,9 @@ size_t strtrim(char *z) { int32_t stop = 0; while (z[j] != 0) { - if (z[j] == ' ' && stop == 0) { + if (isspace(z[j]) && stop == 0) { stop = j; - } else if (z[j] != ' ' && stop != 0) { + } else if (!isspace(z[j]) && stop != 0) { stop = 0; } diff --git a/source/util/src/tworker.c b/source/util/src/tworker.c index dc48fc3f8d2b2e803e8f1593d5471184fa99e059..686e0696ec689b48ecff8f27c7db2eb86daa5eb2 100644 --- a/source/util/src/tworker.c +++ b/source/util/src/tworker.c @@ -75,19 +75,20 @@ static void *tQWorkerThreadFp(SQWorker *worker) { void *msg = NULL; void *ahandle = NULL; int32_t code = 0; + int64_t ts = 0; taosBlockSIGPIPE(); setThreadName(pool->name); uDebug("worker:%s:%d is running", pool->name, worker->id); while (1) { - if (taosReadQitemFromQset(pool->qset, (void **)&msg, &ahandle, &fp) == 0) { + if (taosReadQitemFromQset(pool->qset, (void **)&msg, &ts, &ahandle, &fp) == 0) { uDebug("worker:%s:%d qset:%p, got no message and exiting", pool->name, worker->id, pool->qset); break; } if (fp != NULL) { - SQueueInfo info = {.ahandle = ahandle, .workerId = worker->id, .threadNum = pool->num}; + SQueueInfo info = {.ahandle = ahandle, .workerId = worker->id, .threadNum = pool->num, .timestamp = ts}; (*fp)(&info, msg); } } diff --git a/source/util/test/cfgTest.cpp b/source/util/test/cfgTest.cpp index cd13ebe8ae3a07db3cd75ec39df3f5795566d1dc..c5d9e830d2ece7229af3d18bd8f4e5613c63854e 100644 --- a/source/util/test/cfgTest.cpp +++ b/source/util/test/cfgTest.cpp @@ -59,7 +59,7 @@ TEST_F(CfgTest, 02_Basic) { EXPECT_EQ(cfgAddInt64(pConfig, "test_int64", 2, 0, 16, 0), 0); EXPECT_EQ(cfgAddFloat(pConfig, "test_float", 3, 0, 16, 0), 0); EXPECT_EQ(cfgAddString(pConfig, "test_string", "4", 0), 0); - EXPECT_EQ(cfgAddDir(pConfig, "test_dir", "/tmp", 0), 0); + EXPECT_EQ(cfgAddDir(pConfig, "test_dir", TD_TMP_DIR_PATH, 0), 0); EXPECT_EQ(cfgGetSize(pConfig), 6); @@ -126,7 +126,7 @@ TEST_F(CfgTest, 02_Basic) { EXPECT_EQ(pItem->stype, CFG_STYPE_DEFAULT); EXPECT_EQ(pItem->dtype, CFG_DTYPE_DIR); EXPECT_STREQ(pItem->name, "test_dir"); - EXPECT_STREQ(pItem->str, "/tmp"); + EXPECT_STREQ(pItem->str, TD_TMP_DIR_PATH); cfgCleanup(pConfig); } diff --git a/source/util/test/pageBufferTest.cpp b/source/util/test/pageBufferTest.cpp index 5ad3cb42aa1bfd9061a594c3ba412508b6219767..eaf198a483aa5e3e90595d2417516aa53f754331 100644 --- a/source/util/test/pageBufferTest.cpp +++ b/source/util/test/pageBufferTest.cpp @@ -13,7 +13,7 @@ namespace { // simple test void simpleTest() { SDiskbasedBuf* pBuf = NULL; - int32_t ret = createDiskbasedBuf(&pBuf, 1024, 4096, "", "/tmp/"); + int32_t ret = createDiskbasedBuf(&pBuf, 1024, 4096, "", TD_TMP_DIR_PATH); int32_t pageId = 0; int32_t groupId = 0; @@ -57,7 +57,7 @@ void simpleTest() { void writeDownTest() { SDiskbasedBuf* pBuf = NULL; - int32_t ret = createDiskbasedBuf(&pBuf, 1024, 4*1024, "1", "/tmp/"); + int32_t ret = createDiskbasedBuf(&pBuf, 1024, 4*1024, "1", TD_TMP_DIR_PATH); int32_t pageId = 0; int32_t writePageId = 0; @@ -106,7 +106,7 @@ void writeDownTest() { void recyclePageTest() { SDiskbasedBuf* pBuf = NULL; - int32_t ret = createDiskbasedBuf(&pBuf, 1024, 4*1024, "1", "/tmp/"); + int32_t ret = createDiskbasedBuf(&pBuf, 1024, 4*1024, "1", TD_TMP_DIR_PATH); int32_t pageId = 0; int32_t writePageId = 0; diff --git a/source/util/test/procTest.cpp b/source/util/test/procTest.cpp index af53ddcea5e705b9e8ae50908ac777bdcce6da47..53d3fa2c4bc118d57d9a8d08ecc4810f83aa5eda 100644 --- a/source/util/test/procTest.cpp +++ b/source/util/test/procTest.cpp @@ -38,9 +38,9 @@ class UtilTesProc : public ::testing::Test { head.noResp = 3; head.persistHandle = 4; - taosRemoveDir("/tmp/td"); - taosMkDir("/tmp/td"); - tstrncpy(tsLogDir, "/tmp/td", PATH_MAX); + taosRemoveDir(TD_TMP_DIR_PATH "td"); + taosMkDir(TD_TMP_DIR_PATH "td"); + tstrncpy(tsLogDir, TD_TMP_DIR_PATH "td", PATH_MAX); if (taosInitLog("taosdlog", 1) != 0) { printf("failed to init log file\n"); } diff --git a/tests/pytest/cluster/clusterSetup.py b/tests/pytest/cluster/clusterSetup.py index 87414303f850bcbd78468238e7b76aa3dbb3326e..809e0e9d25ed79246cbd4d83d39f262b0a678cd0 100644 --- a/tests/pytest/cluster/clusterSetup.py +++ b/tests/pytest/cluster/clusterSetup.py @@ -92,13 +92,13 @@ class Node: self.conn.run("yes|./install.sh") def configTaosd(self, taosConfigKey, taosConfigValue): - self.conn.run("sudo echo '%s %s' >> %s" % (taosConfigKey, taosConfigValue, "/etc/taos/taos.cfg")) + self.conn.run("sudo echo %s %s >> %s" % (taosConfigKey, taosConfigValue, "/etc/taos/taos.cfg")) def removeTaosConfig(self, taosConfigKey, taosConfigValue): self.conn.run("sudo sed -in-place -e '/%s %s/d' %s" % (taosConfigKey, taosConfigValue, "/etc/taos/taos.cfg")) def configHosts(self, ip, name): - self.conn.run("echo '%s %s' >> %s" % (ip, name, '/etc/hosts')) + self.conn.run("echo %s %s >> %s" % (ip, name, '/etc/hosts')) def removeData(self): try: diff --git a/tests/pytest/cq.py b/tests/pytest/cq.py deleted file mode 100644 index 7778969619f2d0679c2596581d8d76101d41ed9f..0000000000000000000000000000000000000000 --- a/tests/pytest/cq.py +++ /dev/null @@ -1,169 +0,0 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- -import threading -import taos -import sys -import json -import time -import random -# query sql -query_sql = [ -# first supertable -"select count(*) from test.meters ;", -"select count(*) from test.meters where t3 > 2;", -"select count(*) from test.meters where ts <> '2020-05-13 10:00:00.002';", -"select count(*) from test.meters where t7 like 'taos_1%';", -"select count(*) from test.meters where t7 like '_____2';", -"select count(*) from test.meters where t8 like '%思%';", -"select count(*) from test.meters interval(1n) order by ts desc;", -#"select max(c0) from test.meters group by tbname", -"select first(ts) from test.meters where t5 >5000 and t5<5100;", -"select last(ts) from test.meters where t5 >5000 and t5<5100;", -"select last_row(*) from test.meters;", -"select twa(c1) from test.t1 where ts > 1500000001000 and ts < 1500000101000" , -"select avg(c1) from test.meters where t5 >5000 and t5<5100;", -"select bottom(c1, 2) from test.t1;", -"select diff(c1) from test.t1;", -"select leastsquares(c1, 1, 1) from test.t1 ;", -"select max(c1) from test.meters where t5 >5000 and t5<5100;", -"select min(c1) from test.meters where t5 >5000 and t5<5100;", -"select c1 + c2 + c1 / c5 + c4 + c2 from test.t1;", -"select percentile(c1, 50) from test.t1;", -"select spread(c1) from test.t1 ;", -"select stddev(c1) from test.t1;", -"select sum(c1) from test.meters where t5 >5000 and t5<5100;", -"select top(c1, 2) from test.meters where t5 >5000 and t5<5100;" -"select twa(c4) from test.t1 where ts > 1500000001000 and ts < 1500000101000" , -"select avg(c4) from test.meters where t5 >5000 and t5<5100;", -"select bottom(c4, 2) from test.t1 where t5 >5000 and t5<5100;", -"select diff(c4) from test.t1 where t5 >5000 and t5<5100;", -"select leastsquares(c4, 1, 1) from test.t1 ;", -"select max(c4) from test.meters where t5 >5000 and t5<5100;", -"select min(c4) from test.meters where t5 >5000 and t5<5100;", -"select c5 + c2 + c4 / c5 + c4 + c2 from test.t1 ;", -"select percentile(c5, 50) from test.t1;", -"select spread(c5) from test.t1 ;", -"select stddev(c5) from test.t1 where t5 >5000 and t5<5100;", -"select sum(c5) from test.meters where t5 >5000 and t5<5100;", -"select top(c5, 2) from test.meters where t5 >5000 and t5<5100;", -#all vnode -"select count(*) from test.meters where t5 >5000 and t5<5100", -"select max(c0),avg(c1) from test.meters where t5 >5000 and t5<5100", -"select sum(c5),avg(c1) from test.meters where t5 >5000 and t5<5100", -"select max(c0),min(c5) from test.meters where t5 >5000 and t5<5100", -"select min(c0),avg(c5) from test.meters where t5 >5000 and t5<5100", -# second supertable -"select count(*) from test.meters1 where t3 > 2;", -"select count(*) from test.meters1 where ts <> '2020-05-13 10:00:00.002';", -"select count(*) from test.meters where t7 like 'taos_1%';", -"select count(*) from test.meters where t7 like '_____2';", -"select count(*) from test.meters where t8 like '%思%';", -"select count(*) from test.meters1 interval(1n) order by ts desc;", -#"select max(c0) from test.meters1 group by tbname", -"select first(ts) from test.meters1 where t5 >5000 and t5<5100;", -"select last(ts) from test.meters1 where t5 >5000 and t5<5100;", -"select last_row(*) from test.meters1 ;", -"select twa(c1) from test.m1 where ts > 1500000001000 and ts < 1500000101000" , -"select avg(c1) from test.meters1 where t5 >5000 and t5<5100;", -"select bottom(c1, 2) from test.m1 where t5 >5000 and t5<5100;", -"select diff(c1) from test.m1 ;", -"select leastsquares(c1, 1, 1) from test.m1 ;", -"select max(c1) from test.meters1 where t5 >5000 and t5<5100;", -"select min(c1) from test.meters1 where t5 >5000 and t5<5100;", -"select c1 + c2 + c1 / c0 + c2 from test.m1 ;", -"select percentile(c1, 50) from test.m1;", -"select spread(c1) from test.m1 ;", -"select stddev(c1) from test.m1;", -"select sum(c1) from test.meters1 where t5 >5000 and t5<5100;", -"select top(c1, 2) from test.meters1 where t5 >5000 and t5<5100;", -"select twa(c5) from test.m1 where ts > 1500000001000 and ts < 1500000101000" , -"select avg(c5) from test.meters1 where t5 >5000 and t5<5100;", -"select bottom(c5, 2) from test.m1;", -"select diff(c5) from test.m1;", -"select leastsquares(c5, 1, 1) from test.m1 ;", -"select max(c5) from test.meters1 where t5 >5000 and t5<5100;", -"select min(c5) from test.meters1 where t5 >5000 and t5<5100;", -"select c5 + c2 + c4 / c5 + c0 from test.m1;", -"select percentile(c4, 50) from test.m1;", -"select spread(c4) from test.m1 ;", -"select stddev(c4) from test.m1;", -"select sum(c4) from test.meters1 where t5 >5100 and t5<5300;", -"select top(c4, 2) from test.meters1 where t5 >5100 and t5<5300;", -"select count(*) from test.meters1 where t5 >5100 and t5<5300", -#all vnode -"select count(*) from test.meters1 where t5 >5100 and t5<5300", -"select max(c0),avg(c1) from test.meters1 where t5 >5000 and t5<5100", -"select sum(c5),avg(c1) from test.meters1 where t5 >5000 and t5<5100", -"select max(c0),min(c5) from test.meters1 where t5 >5000 and t5<5100", -"select min(c0),avg(c5) from test.meters1 where t5 >5000 and t5<5100", -#join -# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t5 = meters1.t5", -# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t7 = meters1.t7", -# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t8 = meters1.t8", -# "select meters.ts,meters1.c2 from meters,meters1 where meters.ts = meters1.ts and meters.t8 = meters1.t8" -] - -class ConcurrentInquiry: - def initConnection(self): - self.numOfTherads = 50 - self.ts=1500000001000 - - def SetThreadsNum(self,num): - self.numOfTherads=num - def query_thread(self,threadID): - host = "10.211.55.14" - user = "root" - password = "taosdata" - conn = taos.connect( - host, - user, - password, - ) - cl = conn.cursor() - cl.execute("use test;") - - print("Thread %d: starting" % threadID) - - while True: - ran_query_sql=query_sql - random.shuffle(ran_query_sql) - for i in ran_query_sql: - print("Thread %d : %s"% (threadID,i)) - try: - start = time.time() - cl.execute(i) - cl.fetchall() - end = time.time() - print("time cost :",end-start) - except Exception as e: - print( - "Failure thread%d, sql: %s,exception: %s" % - (threadID, str(i),str(e))) - exit(-1) - - - print("Thread %d: finishing" % threadID) - - - - def run(self): - - threads = [] - for i in range(self.numOfTherads): - thread = threading.Thread(target=self.query_thread, args=(i,)) - threads.append(thread) - thread.start() - -q = ConcurrentInquiry() -q.initConnection() -q.run() diff --git a/tests/pytest/dockerCluster/basic.py b/tests/pytest/dockerCluster/basic.py index 871d69790d328f3dcea9fdfdac27a6abc3bb14bd..5188aa4a80a8faacfbc4056958bde2363a796449 100644 --- a/tests/pytest/dockerCluster/basic.py +++ b/tests/pytest/dockerCluster/basic.py @@ -113,7 +113,7 @@ class BuildDockerCluser: def cfg(self, option, value, nodeIndex): cfgPath = "%s/node%d/cfg/taos.cfg" % (self.dockerDir, nodeIndex) - cmd = "echo '%s %s' >> %s" % (option, value, cfgPath) + cmd = "echo %s %s >> %s" % (option, value, cfgPath) self.execCmd(cmd) def updateLocalhosts(self): @@ -122,7 +122,7 @@ class BuildDockerCluser: print(result) if result is None or result.isspace(): print("==========") - cmd = "echo '172.27.0.7 tdnode1' >> /etc/hosts" + cmd = "echo 172.27.0.7 tdnode1 >> /etc/hosts" display = "echo %s" % cmd self.execCmd(display) self.execCmd(cmd) diff --git a/tests/pytest/fulltest.bat b/tests/pytest/fulltest.bat new file mode 100644 index 0000000000000000000000000000000000000000..fd74f2ad029c982a3a3dd98ae0c8df264bab9c66 --- /dev/null +++ b/tests/pytest/fulltest.bat @@ -0,0 +1,22 @@ + +python .\test.py -f insert\basic.py +python .\test.py -f insert\int.py +python .\test.py -f insert\float.py +python .\test.py -f insert\bigint.py +python .\test.py -f insert\bool.py +python .\test.py -f insert\double.py +python .\test.py -f insert\smallint.py +python .\test.py -f insert\tinyint.py +python .\test.py -f insert\date.py +python .\test.py -f insert\binary.py +python .\test.py -f insert\nchar.py + +python .\test.py -f query\filter.py +python .\test.py -f query\filterCombo.py +python .\test.py -f query\queryNormal.py +python .\test.py -f query\queryError.py +python .\test.py -f query\filterAllIntTypes.py +python .\test.py -f query\filterFloatAndDouble.py +python .\test.py -f query\filterOtherTypes.py +python .\test.py -f query\querySort.py +python .\test.py -f query\queryJoin.py \ No newline at end of file diff --git a/tests/pytest/manualTest/TD-5114/rollingUpgrade.py b/tests/pytest/manualTest/TD-5114/rollingUpgrade.py index f634eb1208b69f263ea89db2440db40ec3e085e6..b2d5171972b9e5e0025c4e46e8dd1f257ed48e24 100644 --- a/tests/pytest/manualTest/TD-5114/rollingUpgrade.py +++ b/tests/pytest/manualTest/TD-5114/rollingUpgrade.py @@ -38,7 +38,7 @@ class Node: def buildTaosd(self): try: print(self.conn) - # self.conn.run('echo "1234" > /home/chr/installtest/test.log') + # self.conn.run('echo 1234 > /home/chr/installtest/test.log') self.conn.run("cd /home/chr/installtest/ && tar -xvf %s " %self.verName) self.conn.run("cd /home/chr/installtest/%s && ./install.sh " % self.installPath) except Exception as e: @@ -49,7 +49,7 @@ class Node: def rebuildTaosd(self): try: print(self.conn) - # self.conn.run('echo "1234" > /home/chr/installtest/test.log') + # self.conn.run('echo 1234 > /home/chr/installtest/test.log') self.conn.run("cd /home/chr/installtest/%s && ./install.sh " % self.installPath) except Exception as e: print("Build Taosd error for node %d " % self.index) @@ -108,7 +108,7 @@ class oneNode: # install TDengine at 192.168.103/104/141 try: node = Node(id, username, IP, passwd, version) - node.conn.run('echo "start taosd"') + node.conn.run('echo start taosd') node.buildTaosd() # clear DataPath , if need clear data node.clearData() @@ -128,7 +128,7 @@ class oneNode: # start TDengine try: node = Node(id, username, IP, passwd, version) - node.conn.run('echo "restart taosd"') + node.conn.run('echo restart taosd') # clear DataPath , if need clear data node.clearData() node.restartTaosd() @@ -149,14 +149,14 @@ class oneNode: verName = "TDengine-enterprise-server-%s-Linux-x64.tar.gz" % version # installPath = "TDengine-enterprise-server-%s" % self.version node131 = Node(131, 'ubuntu', '192.168.1.131', 'tbase125!', '2.0.20.0') - node131.conn.run('echo "upgrade cluster"') + node131.conn.run('echo upgrade cluster') node131.conn.run('sshpass -p tbase125! scp /nas/TDengine/v%s/enterprise/%s root@192.168.1.%d:/home/chr/installtest/' % (version,verName,id)) node131.conn.close() # upgrade TDengine at 192.168.103/104/141 try: node = Node(id, username, IP, passwd, version) - node.conn.run('echo "start taosd"') - node.conn.run('echo "1234" > /home/chr/test.log') + node.conn.run('echo start taosd') + node.conn.run('echo 1234 > /home/chr/test.log') node.buildTaosd() time.sleep(5) node.startTaosd() @@ -176,7 +176,7 @@ class oneNode: # backCluster TDengine at 192.168.103/104/141 try: node = Node(id, username, IP, passwd, version) - node.conn.run('echo "rollback taos"') + node.conn.run('echo rollback taos') node.rebuildTaosd() time.sleep(5) node.startTaosd() diff --git a/tests/pytest/stream/cqSupportBefore1970.py b/tests/pytest/stream/cqSupportBefore1970.py deleted file mode 100644 index 01ba5234fcabb96a4c3c7c28e405c316d6e7dc7d..0000000000000000000000000000000000000000 --- a/tests/pytest/stream/cqSupportBefore1970.py +++ /dev/null @@ -1,93 +0,0 @@ -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- - -import sys -from util.log import * -from util.cases import * -from util.sql import * -from util.dnodes import * - - -class TDTestCase: - def init(self, conn, logSql): - tdLog.debug(f"start to execute {__file__}") - tdSql.init(conn.cursor(), logSql) - - def insertnow(self): - - # timestamp list: - # 0 -> "1970-01-01 08:00:00" | -28800000 -> "1970-01-01 00:00:00" | -946800000000 -> "1940-01-01 00:00:00" - # -631180800000 -> "1950-01-01 00:00:00" - - tsp1 = 0 - tsp2 = -28800000 - tsp3 = -946800000000 - tsp4 = "1969-01-01 00:00:00.000" - - tdSql.execute("insert into tcq1 values (now-11d, 5)") - tdSql.execute(f"insert into tcq1 values ({tsp1}, 4)") - tdSql.execute(f"insert into tcq1 values ({tsp2}, 3)") - tdSql.execute(f"insert into tcq1 values ('{tsp4}', 2)") - tdSql.execute(f"insert into tcq1 values ({tsp3}, 1)") - - def waitedQuery(self, sql, expectRows, timeout): - tdLog.info(f"sql: {sql}, try to retrieve {expectRows} rows in {timeout} seconds") - try: - for i in range(timeout): - tdSql.cursor.execute(sql) - self.queryResult = tdSql.cursor.fetchall() - self.queryRows = len(self.queryResult) - self.queryCols = len(tdSql.cursor.description) - # tdLog.info("sql: %s, try to retrieve %d rows,get %d rows" % (sql, expectRows, self.queryRows)) - if self.queryRows >= expectRows: - return (self.queryRows, i) - time.sleep(1) - except Exception as e: - caller = inspect.getframeinfo(inspect.stack()[1][0]) - tdLog.notice(f"{caller.filename}({caller.lineno}) failed: sql:{sql}, {repr(e)}") - raise Exception(repr(e)) - return (self.queryRows, timeout) - - def cq(self): - tdSql.execute( - "create table cq1 as select avg(c1) from tcq1 where ts > -946800000000 interval(10d) sliding(1d)" - ) - self.waitedQuery("select * from cq1", 1, 120) - - def querycq(self): - tdSql.query("select * from cq1") - tdSql.checkData(0, 1, 1.0) - tdSql.checkData(10, 1, 2.0) - - def run(self): - tdSql.execute("drop database if exists dbcq") - tdSql.execute("create database if not exists dbcq keep 36500") - tdSql.execute("use dbcq") - - tdSql.execute("create table stbcq (ts timestamp, c1 int ) TAGS(t1 int)") - tdSql.execute("create table tcq1 using stbcq tags(1)") - - self.insertnow() - self.cq() - self.querycq() - - # after wal and sync, check again - tdSql.query("show dnodes") - index = tdSql.getData(0, 0) - tdDnodes.stop(index) - tdDnodes.start(index) - - self.querycq() - - def stop(self): - tdSql.close() - tdLog.success(f"{__file__} successfully executed") - - -tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/stream/history.py b/tests/pytest/stream/history.py deleted file mode 100644 index cb8a4d598651473f907aa05a0609c9ce68c78f82..0000000000000000000000000000000000000000 --- a/tests/pytest/stream/history.py +++ /dev/null @@ -1,67 +0,0 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- - -import sys -import time -import taos -from util.log import tdLog -from util.cases import tdCases -from util.sql import tdSql - - -class TDTestCase: - def init(self, conn, logSql): - tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) - - def run(self): - tdSql.prepare() - - tdSql.execute("create table cars(ts timestamp, s int) tags(id int)") - tdSql.execute("create table car0 using cars tags(0)") - tdSql.execute("create table car1 using cars tags(1)") - tdSql.execute("create table car2 using cars tags(2)") - tdSql.execute("create table car3 using cars tags(3)") - tdSql.execute("create table car4 using cars tags(4)") - - tdSql.execute("insert into car0 values('2019-01-01 00:00:00.103', 1)") - tdSql.execute("insert into car1 values('2019-01-01 00:00:00.234', 1)") - tdSql.execute("insert into car0 values('2019-01-01 00:00:01.012', 1)") - tdSql.execute("insert into car0 values('2019-01-01 00:00:02.003', 1)") - tdSql.execute("insert into car2 values('2019-01-01 00:00:02.328', 1)") - tdSql.execute("insert into car0 values('2019-01-01 00:00:03.139', 1)") - tdSql.execute("insert into car0 values('2019-01-01 00:00:04.348', 1)") - tdSql.execute("insert into car0 values('2019-01-01 00:00:05.783', 1)") - tdSql.execute("insert into car1 values('2019-01-01 00:00:01.893', 1)") - tdSql.execute("insert into car1 values('2019-01-01 00:00:02.712', 1)") - tdSql.execute("insert into car1 values('2019-01-01 00:00:03.982', 1)") - tdSql.execute("insert into car3 values('2019-01-01 00:00:01.389', 1)") - tdSql.execute("insert into car4 values('2019-01-01 00:00:01.829', 1)") - - tdSql.error("create table strm as select count(*) from cars") - - tdSql.execute("create table strm as select count(*) from cars interval(4s)") - tdSql.waitedQuery("select * from strm", 2, 100) - tdSql.checkData(0, 1, 11) - tdSql.checkData(1, 1, 2) - - - - - def stop(self): - tdSql.close() - tdLog.success("%s successfully executed" % __file__) - - -tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/stream/metric_1.py b/tests/pytest/stream/metric_1.py deleted file mode 100644 index b4cccac69c8afe9c637b7a455732572c029258a7..0000000000000000000000000000000000000000 --- a/tests/pytest/stream/metric_1.py +++ /dev/null @@ -1,104 +0,0 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- - -import sys -import time -import taos -from util.log import tdLog -from util.cases import tdCases -from util.sql import tdSql - - -class TDTestCase: - def init(self, conn, logSql): - tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) - - def createFuncStream(self, expr, suffix, value): - tbname = "strm_" + suffix - tdLog.info("create stream table %s" % tbname) - tdSql.query("select %s from stb interval(1d)" % expr) - tdSql.checkData(0, 1, value) - tdSql.execute("create table %s as select %s from stb interval(1d)" % (tbname, expr)) - - def checkStreamData(self, suffix, value): - sql = "select * from strm_" + suffix - tdSql.waitedQuery(sql, 1, 120) - tdSql.checkData(0, 1, value) - - def run(self): - tbNum = 10 - rowNum = 20 - - tdSql.prepare() - - tdLog.info("===== preparing data =====") - tdSql.execute( - "create table stb(ts timestamp, tbcol int, tbcol2 float) tags(tgcol int)") - for i in range(tbNum): - tdSql.execute("create table tb%d using stb tags(%d)" % (i, i)) - for j in range(rowNum): - tdSql.execute( - "insert into tb%d values (now - %dm, %d, %d)" % - (i, 1440 - j, j, j)) - time.sleep(0.1) - - self.createFuncStream("count(*)", "c1", 200) - self.createFuncStream("count(tbcol)", "c2", 200) - self.createFuncStream("count(tbcol2)", "c3", 200) - self.createFuncStream("avg(tbcol)", "av", 9.5) - self.createFuncStream("sum(tbcol)", "su", 1900) - self.createFuncStream("min(tbcol)", "mi", 0) - self.createFuncStream("max(tbcol)", "ma", 19) - self.createFuncStream("first(tbcol)", "fi", 0) - self.createFuncStream("last(tbcol)", "la", 19) - #tdSql.query("select stddev(tbcol) from stb interval(1d)") - #tdSql.query("select leastsquares(tbcol, 1, 1) from stb interval(1d)") - tdSql.query("select top(tbcol, 1) from stb interval(1d)") - tdSql.query("select bottom(tbcol, 1) from stb interval(1d)") - #tdSql.query("select percentile(tbcol, 1) from stb interval(1d)") - #tdSql.query("select diff(tbcol) from stb interval(1d)") - - tdSql.query("select count(tbcol) from stb where ts < now + 4m interval(1d)") - tdSql.checkData(0, 1, 200) - #tdSql.execute("create table strm_wh as select count(tbcol) from stb where ts < now + 4m interval(1d)") - - self.createFuncStream("count(tbcol)", "as", 200) - - tdSql.query("select count(tbcol) from stb interval(1d) group by tgcol") - tdSql.checkData(0, 1, 20) - - tdSql.query("select count(tbcol) from stb where ts < now + 4m interval(1d) group by tgcol") - tdSql.checkData(0, 1, 20) - - self.checkStreamData("c1", 200) - self.checkStreamData("c2", 200) - self.checkStreamData("c3", 200) - self.checkStreamData("av", 9.5) - self.checkStreamData("su", 1900) - self.checkStreamData("mi", 0) - self.checkStreamData("ma", 19) - self.checkStreamData("fi", 0) - self.checkStreamData("la", 19) - #self.checkStreamData("wh", 200) - self.checkStreamData("as", 200) - - def stop(self): - tdSql.close() - tdLog.success("%s successfully executed" % __file__) - - -tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) - - diff --git a/tests/pytest/stream/metric_n.py b/tests/pytest/stream/metric_n.py deleted file mode 100644 index d223fe81fc79835047bac8ca2341cdbeac2e6617..0000000000000000000000000000000000000000 --- a/tests/pytest/stream/metric_n.py +++ /dev/null @@ -1,123 +0,0 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- - -import sys -import time -import taos -from util.log import tdLog -from util.cases import tdCases -from util.sql import tdSql - - -class TDTestCase: - def init(self, conn, logSql): - tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) - - def run(self): - tbNum = 10 - rowNum = 20 - totalNum = tbNum * rowNum - - tdSql.prepare() - - tdLog.info("===== preparing data =====") - tdSql.execute( - "create table stb(ts timestamp, tbcol int, tbcol2 float) tags(tgcol int)") - for i in range(tbNum): - tdSql.execute("create table tb%d using stb tags(%d)" % (i, i)) - for j in range(rowNum): - tdSql.execute( - "insert into tb%d values (now - %dm, %d, %d)" % - (i, 1440 - j, j, j)) - time.sleep(0.1) - - tdLog.info("===== step 1 =====") - tdSql.query("select count(*), count(tbcol), count(tbcol2) from stb interval(1d)") - tdSql.checkData(0, 1, totalNum) - tdSql.checkData(0, 2, totalNum) - tdSql.checkData(0, 3, totalNum) - - tdLog.info("===== step 2 =====") - tdSql.execute("create table strm_c3 as select count(*), count(tbcol), count(tbcol2) from stb interval(1d)") - - tdLog.info("===== step 3 =====") - tdSql.execute("create table strm_c32 as select count(*), count(tbcol) as c1, count(tbcol2) as c2, count(tbcol) as c3, count(tbcol) as c4, count(tbcol) as c5, count(tbcol) as c6, count(tbcol) as c7, count(tbcol) as c8, count(tbcol) as c9, count(tbcol) as c10, count(tbcol) as c11, count(tbcol) as c12, count(tbcol) as c13, count(tbcol) as c14, count(tbcol) as c15, count(tbcol) as c16, count(tbcol) as c17, count(tbcol) as c18, count(tbcol) as c19, count(tbcol) as c20, count(tbcol) as c21, count(tbcol) as c22, count(tbcol) as c23, count(tbcol) as c24, count(tbcol) as c25, count(tbcol) as c26, count(tbcol) as c27, count(tbcol) as c28, count(tbcol) as c29, count(tbcol) as c30 from stb interval(1d)") - - tdLog.info("===== step 4 =====") - tdSql.query("select count(*), count(tbcol) as c1, count(tbcol2) as c2, count(tbcol) as c3, count(tbcol) as c4, count(tbcol) as c5, count(tbcol) as c6, count(tbcol) as c7, count(tbcol) as c8, count(tbcol) as c9, count(tbcol) as c10, count(tbcol) as c11, count(tbcol) as c12, count(tbcol) as c13, count(tbcol) as c14, count(tbcol) as c15, count(tbcol) as c16, count(tbcol) as c17, count(tbcol) as c18, count(tbcol) as c19, count(tbcol) as c20, count(tbcol) as c21, count(tbcol) as c22, count(tbcol) as c23, count(tbcol) as c24, count(tbcol) as c25, count(tbcol) as c26, count(tbcol) as c27, count(tbcol) as c28, count(tbcol) as c29, count(tbcol) as c30 from stb interval(1d)") - tdSql.checkData(0, 1, totalNum) - tdSql.checkData(0, 2, totalNum) - tdSql.checkData(0, 3, totalNum) - - tdLog.info("===== step 5 =====") - tdSql.execute("create table strm_c31 as select count(*), count(tbcol) as c1, count(tbcol2) as c2, count(tbcol) as c3, count(tbcol) as c4, count(tbcol) as c5, count(tbcol) as c6, count(tbcol) as c7, count(tbcol) as c8, count(tbcol) as c9, count(tbcol) as c10, count(tbcol) as c11, count(tbcol) as c12, count(tbcol) as c13, count(tbcol) as c14, count(tbcol) as c15, count(tbcol) as c16, count(tbcol) as c17, count(tbcol) as c18, count(tbcol) as c19, count(tbcol) as c20, count(tbcol) as c21, count(tbcol) as c22, count(tbcol) as c23, count(tbcol) as c24, count(tbcol) as c25, count(tbcol) as c26, count(tbcol) as c27, count(tbcol) as c28, count(tbcol) as c29, count(tbcol) as c30 from stb interval(1d)") - - tdLog.info("===== step 6 =====") - tdSql.query("select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from stb interval(1d)") - tdSql.checkData(0, 1, 9.5) - tdSql.checkData(0, 2, 1900) - tdSql.checkData(0, 3, 0) - tdSql.checkData(0, 4, 19) - tdSql.checkData(0, 5, 0) - tdSql.checkData(0, 6, 19) - tdSql.execute("create table strm_avg as select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from stb interval(1d)") - - tdLog.info("===== step 7 =====") - tdSql.query("select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol), count(tbcol) from stb where ts < now + 4m interval(1d)") - tdSql.checkData(0, 1, 9.5) - tdSql.checkData(0, 2, 1900) - tdSql.checkData(0, 3, 0) - tdSql.checkData(0, 4, 19) - tdSql.checkData(0, 5, 0) - tdSql.checkData(0, 6, 19) - tdSql.checkData(0, 7, totalNum) - - tdLog.info("===== step 8 =====") - tdSql.query("select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol), count(tbcol) from stb where ts < now + 4m interval(1d)") - tdSql.checkData(0, 1, 9.5) - tdSql.checkData(0, 2, 1900) - tdSql.checkData(0, 3, 0) - tdSql.checkData(0, 4, 19) - tdSql.checkData(0, 5, 0) - tdSql.checkData(0, 6, 19) - tdSql.checkData(0, 7, totalNum) - - tdLog.info("===== step 9 =====") - tdSql.waitedQuery("select * from strm_c3", 1, 120) - tdSql.checkData(0, 1, totalNum) - tdSql.checkData(0, 2, totalNum) - tdSql.checkData(0, 3, totalNum) - - tdLog.info("===== step 10 =====") - tdSql.waitedQuery("select * from strm_c31", 1, 30) - for i in range(1, 10): - tdSql.checkData(0, i, totalNum) - - tdLog.info("===== step 11 =====") - tdSql.waitedQuery("select * from strm_avg", 1, 20) - tdSql.checkData(0, 1, 9.5) - tdSql.checkData(0, 2, 1900) - tdSql.checkData(0, 3, 0) - tdSql.checkData(0, 4, 19) - tdSql.checkData(0, 5, 0) - tdSql.checkData(0, 6, 19) - - - def stop(self): - tdSql.close() - tdLog.success("%s successfully executed" % __file__) - - -tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/stream/new.py b/tests/pytest/stream/new.py deleted file mode 100644 index 4a0e47c01ad9f9aac7ed78be0ff4fc93fc0d41ed..0000000000000000000000000000000000000000 --- a/tests/pytest/stream/new.py +++ /dev/null @@ -1,79 +0,0 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- - -import sys -import time -import taos -from util.log import tdLog -from util.cases import tdCases -from util.sql import tdSql - - -class TDTestCase: - def init(self, conn, logSql): - tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) - - def run(self): - rowNum = 200 - tdSql.prepare() - - tdLog.info("=============== step1") - tdSql.execute("create table mt(ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)") - for i in range(5): - tdSql.execute("create table tb%d using mt tags(%d)" % (i, i)) - for j in range(rowNum): - tdSql.execute("insert into tb%d values(now + %ds, %d, %d)" % (i, j, j, j)) - time.sleep(0.1) - - tdLog.info("=============== step2") - tdSql.query("select count(*), count(tbcol), count(tbcol2) from mt interval(10s)") - tdSql.execute("create table st as select count(*), count(tbcol), count(tbcol2) from mt interval(10s)") - - tdLog.info("=============== step3") - start = time.time() - tdSql.waitedQuery("select * from st", 1, 180) - delay = int(time.time() - start) + 80 - v = tdSql.getData(0, 3) - if v >= 51: - tdLog.exit("value is %d, which is larger than 51" % v) - - tdLog.info("=============== step4") - for i in range(5, 10): - tdSql.execute("create table tb%d using mt tags(%d)" % (i, i)) - for j in range(rowNum): - tdSql.execute("insert into tb%d values(now + %ds, %d, %d)" % (i, j, j, j)) - - tdLog.info("=============== step5") - maxValue = 0 - for i in range(delay): - time.sleep(1) - tdSql.query("select * from st order by ts desc") - v = tdSql.getData(0, 3) - if v > maxValue: - maxValue = v - if v > 51: - break - - if maxValue <= 51: - tdLog.exit("value is %d, which is smaller than 51" % maxValue) - - def stop(self): - tdSql.close() - tdLog.success("%s successfully executed" % __file__) - - -tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) - - diff --git a/tests/pytest/stream/parser.py b/tests/pytest/stream/parser.py deleted file mode 100644 index 3b231d2b391a8a5a92cb8924134555117c5bfed2..0000000000000000000000000000000000000000 --- a/tests/pytest/stream/parser.py +++ /dev/null @@ -1,182 +0,0 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- - -import sys -import time -import taos -from util.log import tdLog -from util.cases import tdCases -from util.sql import tdSql - - -class TDTestCase: - def init(self, conn, logSql): - tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) - - ''' - def bug2222(self): - tdSql.prepare() - tdSql.execute("create table superreal(ts timestamp, addr binary(5), val float) tags (deviceNo binary(20))") - tdSql.execute("create table real_001 using superreal tags('001')") - tdSql.execute("create table tj_001 as select sum(val) from real_001 interval(1m)") - - t = datetime.datetime.now() - for i in range(60): - ts = t.strftime("%Y-%m-%d %H:%M") - t += datetime.timedelta(minutes=1) - sql = "insert into real_001 values('%s:0%d', '1', %d)" % (ts, 0, i) - for j in range(4): - sql += ",('%s:0%d', '%d', %d)" % (ts, j + 1, j + 1, i) - tdSql.execute(sql) - time.sleep(60 + random.random() * 60 - 30) - ''' - - def tbase300(self): - tdLog.debug("begin tbase300") - - tdSql.prepare() - tdSql.execute("create table mt(ts timestamp, c1 int, c2 int) tags(t1 int)") - tdSql.execute("create table tb1 using mt tags(1)"); - tdSql.execute("create table tb2 using mt tags(2)"); - tdSql.execute("create table strm as select count(*), avg(c1), sum(c2), max(c1), min(c2),first(c1), last(c2) from mt interval(4s) sliding(2s)") - #tdSql.execute("create table strm as select count(*), avg(c1), sum(c2), max(c1), min(c2), first(c1) from mt interval(4s) sliding(2s)") - tdLog.sleep(10) - tdSql.execute("insert into tb2 values(now, 1, 1)"); - tdSql.execute("insert into tb1 values(now, 1, 1)"); - tdLog.sleep(4) - tdSql.query("select * from mt") - tdSql.query("select * from strm") - tdSql.execute("drop table tb1") - - tdSql.waitedQuery("select * from strm", 1, 100) - if tdSql.queryRows < 1 or tdSql.queryRows > 2: - tdLog.exit("rows should be 1 or 2") - - tdSql.execute("drop table tb2") - tdSql.execute("drop table mt") - tdSql.execute("drop table strm") - - def tbase304(self): - tdLog.debug("begin tbase304") - # we cannot reset query cache in server side, as a workaround, - # set super table name to mt304, need to change back to mt later - tdSql.execute("create table mt304 (ts timestamp, c1 int) tags(t1 int, t2 int)") - tdSql.execute("create table tb1 using mt304 tags(1, 1)") - tdSql.execute("create table tb2 using mt304 tags(1, -1)") - time.sleep(0.1) - tdSql.execute("create table strm as select count(*), avg(c1) from mt304 where t2 >= 0 interval(4s) sliding(2s)") - tdSql.execute("insert into tb1 values (now,1)") - tdSql.execute("insert into tb2 values (now,2)") - - tdSql.waitedQuery("select * from strm", 1, 100) - if tdSql.queryRows < 1 or tdSql.queryRows > 2: - tdLog.exit("rows should be 1 or 2") - - tdSql.checkData(0, 1, 1) - tdSql.checkData(0, 2, 1.000000000) - tdSql.execute("alter table mt304 drop tag t2") - tdSql.execute("insert into tb2 values (now,2)") - tdSql.execute("insert into tb1 values (now,1)") - tdSql.query("select * from strm") - tdSql.execute("alter table mt304 add tag t2 int") - tdLog.sleep(1) - tdSql.query("select * from strm") - - def wildcardFilterOnTags(self): - tdLog.debug("begin wildcardFilterOnTag") - tdSql.prepare() - tdSql.execute("create table stb (ts timestamp, c1 int, c2 binary(10)) tags(t1 binary(10))") - tdSql.execute("create table tb1 using stb tags('a1')") - tdSql.execute("create table tb2 using stb tags('b2')") - tdSql.execute("create table tb3 using stb tags('a3')") - tdSql.execute("create table strm as select count(*), avg(c1), first(c2) from stb where t1 like 'a%' interval(4s) sliding(2s)") - tdSql.query("describe strm") - tdSql.checkRows(4) - - tdLog.sleep(1) - tdSql.execute("insert into tb1 values (now, 0, 'tb1')") - tdLog.sleep(4) - tdSql.execute("insert into tb2 values (now, 2, 'tb2')") - tdLog.sleep(4) - tdSql.execute("insert into tb3 values (now, 0, 'tb3')") - - tdSql.waitedQuery("select * from strm", 4, 60) - tdSql.checkRows(4) - tdSql.checkData(0, 2, 0.000000000) - if tdSql.getData(0, 3) == 'tb2': - tdLog.exit("unexpected value of data03") - if tdSql.getData(1, 3) == 'tb2': - tdLog.exit("unexpected value of data13") - if tdSql.getData(2, 3) == 'tb2': - tdLog.exit("unexpected value of data23") - if tdSql.getData(3, 3) == 'tb2': - tdLog.exit("unexpected value of data33") - - tdLog.info("add table tb4 to see if stream still works correctly") - # The vnode client needs to refresh metadata cache to allow strm calculate tb4's data. - # But the current refreshing frequency is every 10 min - # commented out the case below to save running time - tdSql.execute("create table tb4 using stb tags('a4')") - tdSql.execute("insert into tb4 values(now, 4, 'tb4')") - tdSql.waitedQuery("select * from strm order by ts desc", 6, 60) - tdSql.checkRows(6) - tdSql.checkData(0, 2, 4) - tdSql.checkData(0, 3, "tb4") - - tdLog.info("change tag values to see if stream still works correctly") - tdSql.execute("alter table tb4 set tag t1='b4'") - tdLog.sleep(3) - tdSql.execute("insert into tb1 values (now, 1, 'tb1_a1')") - tdLog.sleep(4) - tdSql.execute("insert into tb4 values (now, -4, 'tb4_b4')") - tdSql.waitedQuery("select * from strm order by ts desc", 8, 100) - tdSql.checkRows(8) - tdSql.checkData(0, 2, 1) - tdSql.checkData(0, 3, "tb1_a1") - - def datatypes(self): - tdLog.debug("begin data types") - tdSql.prepare() - tdSql.execute("create table stb3 (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 binary(15), c6 nchar(15), c7 bool) tags(t1 int, t2 binary(15))") - tdSql.execute("create table tb0 using stb3 tags(0, 'tb0')") - tdSql.execute("create table tb1 using stb3 tags(1, 'tb1')") - tdSql.execute("create table tb2 using stb3 tags(2, 'tb2')") - tdSql.execute("create table tb3 using stb3 tags(3, 'tb3')") - tdSql.execute("create table tb4 using stb3 tags(4, 'tb4')") - - tdSql.execute("create table strm0 as select count(ts), count(c1), max(c2), min(c4), first(c5), last(c6) from stb3 where ts < now + 30s interval(4s) sliding(2s)") - #tdSql.execute("create table strm0 as select count(ts), count(c1), max(c2), min(c4), first(c5) from stb where ts < now + 30s interval(4s) sliding(2s)") - tdLog.sleep(1) - tdSql.execute("insert into tb0 values (now, 0, 0, 0, 0, 'binary0', '涛思0', true) tb1 values (now, 1, 1, 1, 1, 'binary1', '涛思1', false) tb2 values (now, 2, 2, 2, 2, 'binary2', '涛思2', true) tb3 values (now, 3, 3, 3, 3, 'binary3', '涛思3', false) tb4 values (now, 4, 4, 4, 4, 'binary4', '涛思4', true) ") - - tdSql.waitedQuery("select * from strm0 order by ts desc", 2, 120) - tdSql.checkRows(2) - - tdSql.execute("insert into tb0 values (now, 10, 10, 10, 10, 'binary0', '涛思0', true) tb1 values (now, 11, 11, 11, 11, 'binary1', '涛思1', false) tb2 values (now, 12, 12, 12, 12, 'binary2', '涛思2', true) tb3 values (now, 13, 13, 13, 13, 'binary3', '涛思3', false) tb4 values (now, 14, 14, 14, 14, 'binary4', '涛思4', true) ") - tdSql.waitedQuery("select * from strm0 order by ts desc", 4, 120) - tdSql.checkRows(4) - - def run(self): - self.tbase300() - self.tbase304() - self.wildcardFilterOnTags() - self.datatypes() - - def stop(self): - tdSql.close() - tdLog.success("%s successfully executed" % __file__) - - -tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/stream/showStreamExecTimeisNull.py b/tests/pytest/stream/showStreamExecTimeisNull.py deleted file mode 100644 index 8a2a09cec6f345d62fc821ba58f60f72d563249f..0000000000000000000000000000000000000000 --- a/tests/pytest/stream/showStreamExecTimeisNull.py +++ /dev/null @@ -1,98 +0,0 @@ -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- - -import sys -from util.log import * -from util.cases import * -from util.sql import * -from util.dnodes import * - - -class TDTestCase: - def init(self, conn, logSql): - tdLog.debug(f"start to execute {__file__}") - tdSql.init(conn.cursor(), logSql) - - def insertnow(self): - - # timestamp list: - # 0 -> "1970-01-01 08:00:00" | -28800000 -> "1970-01-01 00:00:00" | -946800000000 -> "1940-01-01 00:00:00" - # -631180800000 -> "1950-01-01 00:00:00" - - tsp1 = 0 - tsp2 = -28800000 - tsp3 = -946800000000 - tsp4 = "1969-01-01 00:00:00.000" - - tdSql.execute("insert into tcq1 values (now-11d, 5)") - tdSql.execute(f"insert into tcq1 values ({tsp1}, 4)") - tdSql.execute(f"insert into tcq1 values ({tsp2}, 3)") - tdSql.execute(f"insert into tcq1 values ('{tsp4}', 2)") - tdSql.execute(f"insert into tcq1 values ({tsp3}, 1)") - - def waitedQuery(self, sql, expectRows, timeout): - tdLog.info(f"sql: {sql}, try to retrieve {expectRows} rows in {timeout} seconds") - try: - for i in range(timeout): - tdSql.cursor.execute(sql) - self.queryResult = tdSql.cursor.fetchall() - self.queryRows = len(self.queryResult) - self.queryCols = len(tdSql.cursor.description) - # tdLog.info("sql: %s, try to retrieve %d rows,get %d rows" % (sql, expectRows, self.queryRows)) - if self.queryRows >= expectRows: - return (self.queryRows, i) - time.sleep(1) - except Exception as e: - caller = inspect.getframeinfo(inspect.stack()[1][0]) - tdLog.notice(f"{caller.filename}({caller.lineno}) failed: sql:{sql}, {repr(e)}") - raise Exception(repr(e)) - return (self.queryRows, timeout) - - def showstream(self): - tdSql.execute( - "create table cq1 as select avg(c1) from tcq1 interval(10d) sliding(1d)" - ) - sql = "show streams" - timeout = 30 - exception = "ValueError('year -292275055 is out of range')" - try: - for i in range(timeout): - tdSql.cursor.execute(sql) - self.queryResult = tdSql.cursor.fetchall() - self.queryRows = len(self.queryResult) - self.queryCols = len(tdSql.cursor.description) - # tdLog.info("sql: %s, try to retrieve %d rows,get %d rows" % (sql, expectRows, self.queryRows)) - if self.queryRows >= 1: - tdSql.query(sql) - tdSql.checkData(0, 5, None) - return (self.queryRows, i) - time.sleep(1) - except Exception as e: - tdLog.exit(f"sql: {sql} except raise {exception}, actually raise {repr(e)} ") - # else: - # tdLog.exit(f"sql: {sql} except raise {exception}, actually not") - - def run(self): - tdSql.execute("drop database if exists dbcq") - tdSql.execute("create database if not exists dbcq keep 36500") - tdSql.execute("use dbcq") - - tdSql.execute("create table stbcq (ts timestamp, c1 int ) TAGS(t1 int)") - tdSql.execute("create table tcq1 using stbcq tags(1)") - - self.insertnow() - self.showstream() - - - def stop(self): - tdSql.close() - tdLog.success(f"{__file__} successfully executed") - - -tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/stream/stream1.py b/tests/pytest/stream/stream1.py deleted file mode 100644 index c657379441e6da3137e3a1ceb8148ba9fa5ba9a5..0000000000000000000000000000000000000000 --- a/tests/pytest/stream/stream1.py +++ /dev/null @@ -1,142 +0,0 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- - -import sys -import time -import taos -from util.log import tdLog -from util.cases import tdCases -from util.sql import tdSql - - -class TDTestCase: - def init(self, conn, logSql): - tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) - - def run(self): - tbNum = 10 - rowNum = 20 - - tdSql.prepare() - - tdLog.info("===== step1 =====") - tdSql.execute( - "create table stb0(ts timestamp, col1 int, col2 float) tags(tgcol int)") - for i in range(tbNum): - tdSql.execute("create table tb%d using stb0 tags(%d)" % (i, i)) - for j in range(rowNum): - tdSql.execute( - "insert into tb%d values (now - %dm, %d, %d)" % - (i, 1440 - j, j, j)) - time.sleep(0.1) - - tdLog.info("===== step2 =====") - tdSql.query( - "select count(*), count(col1), count(col2) from tb0 interval(1d)") - tdSql.checkData(0, 1, rowNum) - tdSql.checkData(0, 2, rowNum) - tdSql.checkData(0, 3, rowNum) - tdSql.query("show tables") - tdSql.checkRows(tbNum) - tdSql.execute( - "create table s0 as select count(*), count(col1), count(col2) from tb0 interval(1d)") - tdSql.query("show tables") - tdSql.checkRows(tbNum + 1) - - tdLog.info("===== step3 =====") - tdSql.waitedQuery("select * from s0", 1, 120) - try: - tdSql.checkData(0, 1, rowNum) - tdSql.checkData(0, 2, rowNum) - tdSql.checkData(0, 3, rowNum) - except Exception as e: - tdLog.info(repr(e)) - - tdLog.info("===== step4 =====") - tdSql.execute("drop table s0") - tdSql.query("show tables") - tdSql.checkRows(tbNum) - - tdLog.info("===== step5 =====") - tdSql.error("select * from s0") - - tdLog.info("===== step6 =====") - time.sleep(0.1) - tdSql.execute( - "create table s0 as select count(*), count(col1), count(col2) from tb0 interval(1d)") - tdSql.query("show tables") - tdSql.checkRows(tbNum + 1) - - tdLog.info("===== step7 =====") - tdSql.waitedQuery("select * from s0", 1, 120) - try: - tdSql.checkData(0, 1, rowNum) - tdSql.checkData(0, 2, rowNum) - tdSql.checkData(0, 3, rowNum) - except Exception as e: - tdLog.info(repr(e)) - - tdLog.info("===== step8 =====") - tdSql.query( - "select count(*), count(col1), count(col2) from stb0 interval(1d)") - tdSql.checkData(0, 1, rowNum * tbNum) - tdSql.checkData(0, 2, rowNum * tbNum) - tdSql.checkData(0, 3, rowNum * tbNum) - tdSql.query("show tables") - tdSql.checkRows(tbNum + 1) - - tdSql.execute( - "create table s1 as select count(*), count(col1), count(col2) from stb0 interval(1d)") - tdSql.query("show tables") - tdSql.checkRows(tbNum + 2) - - tdLog.info("===== step9 =====") - tdSql.waitedQuery("select * from s1", 1, 120) - try: - tdSql.checkData(0, 1, rowNum * tbNum) - tdSql.checkData(0, 2, rowNum * tbNum) - tdSql.checkData(0, 3, rowNum * tbNum) - except Exception as e: - tdLog.info(repr(e)) - - tdLog.info("===== step10 =====") - tdSql.execute("drop table s1") - tdSql.query("show tables") - tdSql.checkRows(tbNum + 1) - - tdLog.info("===== step11 =====") - tdSql.error("select * from s1") - - tdLog.info("===== step12 =====") - tdSql.execute( - "create table s1 as select count(*), count(col1), count(col2) from stb0 interval(1d)") - tdSql.query("show tables") - tdSql.checkRows(tbNum + 2) - - tdLog.info("===== step13 =====") - tdSql.waitedQuery("select * from s1", 1, 120) - try: - tdSql.checkData(0, 1, rowNum * tbNum) - tdSql.checkData(0, 2, rowNum * tbNum) - tdSql.checkData(0, 3, rowNum * tbNum) - except Exception as e: - tdLog.info(repr(e)) - - def stop(self): - tdSql.close() - tdLog.success("%s successfully executed" % __file__) - - -tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/stream/stream2.py b/tests/pytest/stream/stream2.py deleted file mode 100644 index 9b4eb8725c96f95196f251c55b0b773cd68e9ed5..0000000000000000000000000000000000000000 --- a/tests/pytest/stream/stream2.py +++ /dev/null @@ -1,164 +0,0 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- - -import sys -import time -import taos -from util.log import tdLog -from util.cases import tdCases -from util.sql import tdSql - - -class TDTestCase: - def init(self, conn, logSql): - tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) - - def run(self): - tbNum = 10 - rowNum = 20 - totalNum = tbNum * rowNum - - tdSql.prepare() - - tdLog.info("===== step1 =====") - tdSql.execute( - "create table stb0(ts timestamp, col1 int, col2 float) tags(tgcol int)") - for i in range(tbNum): - tdSql.execute("create table tb%d using stb0 tags(%d)" % (i, i)) - for j in range(rowNum): - tdSql.execute( - "insert into tb%d values (now - %dm, %d, %d)" % - (i, 1440 - j, j, j)) - time.sleep(0.1) - - tdLog.info("===== step2 =====") - tdSql.query("select count(col1) from tb0 interval(1d)") - tdSql.checkData(0, 1, rowNum) - tdSql.query("show tables") - tdSql.checkRows(tbNum) - tdSql.execute( - "create table s0 as select count(col1) from tb0 interval(1d)") - tdSql.query("show tables") - tdSql.checkRows(tbNum + 1) - - tdLog.info("===== step3 =====") - tdSql.waitedQuery("select * from s0", 1, 120) - try: - tdSql.checkData(0, 1, rowNum) - except Exception as e: - tdLog.info(repr(e)) - - tdLog.info("===== step4 =====") - tdSql.execute("drop table s0") - tdSql.query("show tables") - try: - tdSql.checkRows(tbNum) - except Exception as e: - tdLog.info(repr(e)) - - tdLog.info("===== step5 =====") - tdSql.error("select * from s0") - - tdLog.info("===== step6 =====") - tdSql.execute( - "create table s0 as select count(*), count(col1), count(col2) from tb0 interval(1d)") - tdSql.query("show tables") - try: - tdSql.checkRows(tbNum + 1) - except Exception as e: - tdLog.info(repr(e)) - - tdLog.info("===== step7 =====") - tdSql.waitedQuery("select * from s0", 1, 120) - try: - tdSql.checkData(0, 1, rowNum) - tdSql.checkData(0, 2, rowNum) - tdSql.checkData(0, 3, rowNum) - except Exception as e: - tdLog.info(repr(e)) - - - time.sleep(5) - tdSql.query("show streams") - tdSql.checkRows(1) - tdSql.checkData(0, 2, 's0') - - tdLog.info("===== step8 =====") - tdSql.query( - "select count(*), count(col1), count(col2) from stb0 interval(1d)") - try: - tdSql.checkData(0, 1, totalNum) - tdSql.checkData(0, 2, totalNum) - tdSql.checkData(0, 3, totalNum) - except Exception as e: - tdLog.info(repr(e)) - tdSql.query("show tables") - tdSql.checkRows(tbNum + 1) - tdSql.execute( - "create table s1 as select count(*), count(col1), count(col2) from stb0 interval(1d)") - tdSql.query("show tables") - tdSql.checkRows(tbNum + 2) - - tdLog.info("===== step9 =====") - tdSql.waitedQuery("select * from s1", 1, 120) - try: - tdSql.checkData(0, 1, totalNum) - tdSql.checkData(0, 2, totalNum) - tdSql.checkData(0, 3, totalNum) - except Exception as e: - tdLog.info(repr(e)) - - tdLog.info("===== step10 =====") - tdSql.execute("drop table s1") - tdSql.query("show tables") - try: - tdSql.checkRows(tbNum + 1) - except Exception as e: - tdLog.info(repr(e)) - - tdLog.info("===== step11 =====") - tdSql.error("select * from s1") - - tdLog.info("===== step12 =====") - tdSql.execute( - "create table s1 as select count(col1) from stb0 interval(1d)") - tdSql.query("show tables") - try: - tdSql.checkRows(tbNum + 2) - except Exception as e: - tdLog.info(repr(e)) - - tdLog.info("===== step13 =====") - tdSql.waitedQuery("select * from s1", 1, 120) - try: - tdSql.checkData(0, 1, totalNum) - #tdSql.checkData(0, 2, None) - #tdSql.checkData(0, 3, None) - except Exception as e: - tdLog.info(repr(e)) - - time.sleep(5) - tdSql.query("show streams") - tdSql.checkRows(2) - tdSql.checkData(0, 2, 's1') - tdSql.checkData(1, 2, 's0') - - - def stop(self): - tdSql.close() - tdLog.success("%s successfully executed" % __file__) - - -tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/stream/stream3.py b/tests/pytest/stream/stream3.py deleted file mode 100644 index 9a5c6c9aeca08bff1c94861255919255eef89100..0000000000000000000000000000000000000000 --- a/tests/pytest/stream/stream3.py +++ /dev/null @@ -1,108 +0,0 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- - -import sys -import time -import taos -from util.log import tdLog -from util.cases import tdCases -from util.sql import tdSql - - -class TDTestCase: - def init(self, conn, logSql): - tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) - - def run(self): - ts = 1500000000000 - tbNum = 10 - rowNum = 20 - - tdSql.prepare() - - tdLog.info("===== step1 =====") - tdSql.execute( - "create table stb0(ts timestamp, col1 binary(20), col2 nchar(20)) tags(tgcol int)") - for i in range(tbNum): - tdSql.execute("create table tb%d using stb0 tags(%d)" % (i, i)) - for j in range(rowNum): - tdSql.execute( - "insert into tb%d values (%d, 'binary%d', 'nchar%d')" % - (i, ts + 60000 * j, j, j)) - tdSql.execute("insert into tb0 values(%d, null, null)" % (ts + 10000000)) - time.sleep(0.1) - - tdLog.info("===== step2 =====") - tdSql.query( - "select count(*), count(col1), count(col2) from stb0 interval(1d)") - tdSql.checkData(0, 1, rowNum * tbNum + 1) - tdSql.checkData(0, 2, rowNum * tbNum) - tdSql.checkData(0, 3, rowNum * tbNum) - - tdSql.query("show tables") - tdSql.checkRows(tbNum) - tdSql.execute( - "create table s0 as select count(*), count(col1), count(col2) from stb0 interval(1d)") - tdSql.query("show tables") - tdSql.checkRows(tbNum + 1) - - tdLog.info("===== step3 =====") - tdSql.waitedQuery("select * from s0", 1, 120) - try: - tdSql.checkData(0, 1, rowNum * tbNum + 1) - tdSql.checkData(0, 2, rowNum * tbNum) - tdSql.checkData(0, 3, rowNum * tbNum) - except Exception as e: - tdLog.info(repr(e)) - - tdLog.info("===== step4 =====") - tdSql.execute("drop table s0") - tdSql.query("show tables") - tdSql.checkRows(tbNum) - - tdLog.info("===== step5 =====") - tdSql.error("select * from s0") - - tdLog.info("===== step6 =====") - time.sleep(0.1) - tdSql.execute( - "create table s0 as select count(*), count(col1), count(col2) from tb0 interval(1d)") - tdSql.query("show tables") - tdSql.checkRows(tbNum + 1) - - tdLog.info("===== step7 =====") - tdSql.waitedQuery("select * from s0", 1, 120) - try: - tdSql.checkData(0, 1, rowNum + 1) - tdSql.checkData(0, 2, rowNum) - tdSql.checkData(0, 3, rowNum) - except Exception as e: - tdLog.info(repr(e)) - - tdLog.info("===== step8 =====") - tdSql.query( - "select count(*), count(col1), count(col2) from stb0 interval(1d)") - tdSql.checkData(0, 1, rowNum * tbNum + 1) - tdSql.checkData(0, 2, rowNum * tbNum) - tdSql.checkData(0, 3, rowNum * tbNum) - tdSql.query("show tables") - tdSql.checkRows(tbNum + 1) - - def stop(self): - tdSql.close() - tdLog.success("%s successfully executed" % __file__) - - -tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/stream/sys.py b/tests/pytest/stream/sys.py deleted file mode 100644 index c9a3fccfe68b61da722dcdb2ccab63bf3d5bcabc..0000000000000000000000000000000000000000 --- a/tests/pytest/stream/sys.py +++ /dev/null @@ -1,62 +0,0 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# migrated from 'stream_on_sys.sim' -# -*- coding: utf-8 -*- -import sys -import time -import taos -from util.log import tdLog -from util.cases import tdCases -from util.sql import tdSql - - -class TDTestCase: - updatecfgDict = {'monitor': 1} - - def init(self, conn, logSql): - tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) - - - def run(self): - time.sleep(5) - tdSql.execute("use log") - - tdSql.execute("create table cpustrm as select count(*), avg(cpu_taosd), max(cpu_taosd), min(cpu_taosd), avg(cpu_system), max(cpu_cores), min(cpu_cores), last(cpu_cores) from log.dn1 interval(4s)") - tdSql.execute("create table memstrm as select count(*), avg(mem_taosd), max(mem_taosd), min(mem_taosd), avg(mem_system), first(mem_total), last(mem_total) from log.dn1 interval(4s)") - tdSql.execute("create table diskstrm as select count(*), avg(disk_used), last(disk_used), avg(disk_total), first(disk_total) from log.dn1 interval(4s)") - tdSql.execute("create table bandstrm as select count(*), avg(band_speed), last(band_speed) from log.dn1 interval(4s)") - tdSql.execute("create table reqstrm as select count(*), avg(req_http), last(req_http), avg(req_select), last(req_select), avg(req_insert), last(req_insert) from log.dn1 interval(4s)") - tdSql.execute("create table iostrm as select count(*), avg(io_read), last(io_read), avg(io_write), last(io_write) from log.dn1 interval(4s)") - - sqls = [ - "select * from cpustrm", - "select * from memstrm", - "select * from diskstrm", - "select * from bandstrm", - "select * from reqstrm", - "select * from iostrm", - ] - for sql in sqls: - (rows, _) = tdSql.waitedQuery(sql, 1, 240) - if rows < 1: - tdLog.exit("failed: sql:%s, expect at least one row" % sql) - - - def stop(self): - tdSql.close() - tdLog.success("%s successfully executed" % __file__) - - -tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) - diff --git a/tests/pytest/stream/table_1.py b/tests/pytest/stream/table_1.py deleted file mode 100644 index b205491fad181a51c991c16da65baa8370174e74..0000000000000000000000000000000000000000 --- a/tests/pytest/stream/table_1.py +++ /dev/null @@ -1,89 +0,0 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- - -import sys -import time -import taos -from util.log import tdLog -from util.cases import tdCases -from util.sql import tdSql - - -class TDTestCase: - def init(self, conn, logSql): - tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) - - def createFuncStream(self, expr, suffix, value): - tbname = "strm_" + suffix - tdLog.info("create stream table %s" % tbname) - tdSql.query("select %s from tb1 interval(1d)" % expr) - tdSql.checkData(0, 1, value) - tdSql.execute("create table %s as select %s from tb1 interval(1d)" % (tbname, expr)) - - def checkStreamData(self, suffix, value): - sql = "select * from strm_" + suffix - tdSql.waitedQuery(sql, 1, 120) - tdSql.checkData(0, 1, value) - - def run(self): - tbNum = 10 - rowNum = 20 - - tdSql.prepare() - - tdLog.info("===== step1 =====") - tdSql.execute( - "create table stb(ts timestamp, tbcol int, tbcol2 float) tags(tgcol int)") - for i in range(tbNum): - tdSql.execute("create table tb%d using stb tags(%d)" % (i, i)) - for j in range(rowNum): - tdSql.execute( - "insert into tb%d values (now - %dm, %d, %d)" % - (i, 1440 - j, j, j)) - time.sleep(1) - - self.createFuncStream("count(*)", "c1", rowNum) - self.createFuncStream("count(tbcol)", "c2", rowNum) - self.createFuncStream("count(tbcol2)", "c3", rowNum) - self.createFuncStream("avg(tbcol)", "av", 9.5) - self.createFuncStream("sum(tbcol)", "su", 190) - self.createFuncStream("min(tbcol)", "mi", 0) - self.createFuncStream("max(tbcol)", "ma", 19) - self.createFuncStream("first(tbcol)", "fi", 0) - self.createFuncStream("last(tbcol)", "la", 19) - self.createFuncStream("stddev(tbcol)", "st", 5.766281297335398) - self.createFuncStream("percentile(tbcol, 1)", "pe", 0.19) - self.createFuncStream("count(tbcol)", "as", rowNum) - - self.checkStreamData("c1", rowNum) - self.checkStreamData("c2", rowNum) - self.checkStreamData("c3", rowNum) - self.checkStreamData("av", 9.5) - self.checkStreamData("su", 190) - self.checkStreamData("mi", 0) - self.checkStreamData("ma", 19) - self.checkStreamData("fi", 0) - self.checkStreamData("la", 19) - self.checkStreamData("st", 5.766281297335398) - self.checkStreamData("pe", 0.19) - self.checkStreamData("as", rowNum) - - - def stop(self): - tdSql.close() - tdLog.success("%s successfully executed" % __file__) - - -tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/stream/table_n.py b/tests/pytest/stream/table_n.py deleted file mode 100644 index 371af769778bce1eb1e6cf1bac89333006c582a8..0000000000000000000000000000000000000000 --- a/tests/pytest/stream/table_n.py +++ /dev/null @@ -1,143 +0,0 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- - -import sys -import time -import taos -from util.log import tdLog -from util.cases import tdCases -from util.sql import tdSql - - -class TDTestCase: - def init(self, conn, logSql): - tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) - - def run(self): - tbNum = 10 - rowNum = 20 - - tdSql.prepare() - - tdLog.info("===== preparing data =====") - tdSql.execute( - "create table stb(ts timestamp, tbcol int, tbcol2 float) tags(tgcol int)") - for i in range(tbNum): - tdSql.execute("create table tb%d using stb tags(%d)" % (i, i)) - for j in range(rowNum): - tdSql.execute( - "insert into tb%d values (now - %dm, %d, %d)" % - (i, 1440 - j, j, j)) - time.sleep(0.1) - - tdLog.info("===== step 1 =====") - tdSql.query("select count(*), count(tbcol), count(tbcol2) from tb1 interval(1d)") - tdSql.checkData(0, 1, rowNum) - tdSql.checkData(0, 2, rowNum) - tdSql.checkData(0, 3, rowNum) - - tdLog.info("===== step 2 =====") - tdSql.execute("create table strm_c3 as select count(*), count(tbcol), count(tbcol2) from tb1 interval(1d)") - - tdLog.info("===== step 3 =====") - tdSql.execute("create table strm_c32 as select count(*), count(tbcol) as c1, count(tbcol2) as c2, count(tbcol) as c3, count(tbcol) as c4, count(tbcol) as c5, count(tbcol) as c6, count(tbcol) as c7, count(tbcol) as c8, count(tbcol) as c9, count(tbcol) as c10, count(tbcol) as c11, count(tbcol) as c12, count(tbcol) as c13, count(tbcol) as c14, count(tbcol) as c15, count(tbcol) as c16, count(tbcol) as c17, count(tbcol) as c18, count(tbcol) as c19, count(tbcol) as c20, count(tbcol) as c21, count(tbcol) as c22, count(tbcol) as c23, count(tbcol) as c24, count(tbcol) as c25, count(tbcol) as c26, count(tbcol) as c27, count(tbcol) as c28, count(tbcol) as c29, count(tbcol) as c30 from tb1 interval(1d)") - - tdLog.info("===== step 4 =====") - tdSql.query("select count(*), count(tbcol) as c1, count(tbcol2) as c2, count(tbcol) as c3, count(tbcol) as c4, count(tbcol) as c5, count(tbcol) as c6, count(tbcol) as c7, count(tbcol) as c8, count(tbcol) as c9, count(tbcol) as c10, count(tbcol) as c11, count(tbcol) as c12, count(tbcol) as c13, count(tbcol) as c14, count(tbcol) as c15, count(tbcol) as c16, count(tbcol) as c17, count(tbcol) as c18, count(tbcol) as c19, count(tbcol) as c20, count(tbcol) as c21, count(tbcol) as c22, count(tbcol) as c23, count(tbcol) as c24, count(tbcol) as c25, count(tbcol) as c26, count(tbcol) as c27, count(tbcol) as c28, count(tbcol) as c29, count(tbcol) as c30 from tb1 interval(1d)") - tdSql.checkData(0, 1, rowNum) - tdSql.checkData(0, 2, rowNum) - tdSql.checkData(0, 3, rowNum) - - tdLog.info("===== step 5 =====") - tdSql.execute("create table strm_c31 as select count(*), count(tbcol) as c1, count(tbcol2) as c2, count(tbcol) as c3, count(tbcol) as c4, count(tbcol) as c5, count(tbcol) as c6, count(tbcol) as c7, count(tbcol) as c8, count(tbcol) as c9, count(tbcol) as c10, count(tbcol) as c11, count(tbcol) as c12, count(tbcol) as c13, count(tbcol) as c14, count(tbcol) as c15, count(tbcol) as c16, count(tbcol) as c17, count(tbcol) as c18, count(tbcol) as c19, count(tbcol) as c20, count(tbcol) as c21, count(tbcol) as c22, count(tbcol) as c23, count(tbcol) as c24, count(tbcol) as c25, count(tbcol) as c26, count(tbcol) as c27, count(tbcol) as c28, count(tbcol) as c29, count(tbcol) as c30 from tb1 interval(1d)") - - tdLog.info("===== step 6 =====") - tdSql.query("select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from tb1 interval(1d)") - tdSql.checkData(0, 1, 9.5) - tdSql.checkData(0, 2, 190) - tdSql.checkData(0, 3, 0) - tdSql.checkData(0, 4, 19) - tdSql.checkData(0, 5, 0) - tdSql.checkData(0, 6, 19) - tdSql.execute("create table strm_avg as select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from tb1 interval(1d)") - - tdLog.info("===== step 7 =====") - tdSql.query("select stddev(tbcol), leastsquares(tbcol, 1, 1), percentile(tbcol, 1) from tb1 interval(1d)") - tdSql.checkData(0, 1, 5.766281297335398) - tdSql.checkData(0, 3, 0.19) - tdSql.execute("create table strm_ot as select stddev(tbcol), leastsquares(tbcol, 1, 1), percentile(tbcol, 1) from tb1 interval(1d)") - - tdLog.info("===== step 8 =====") - tdSql.query("select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol), stddev(tbcol), percentile(tbcol, 1), count(tbcol), leastsquares(tbcol, 1, 1) from tb1 interval(1d)") - tdSql.checkData(0, 1, 9.5) - tdSql.checkData(0, 2, 190) - tdSql.checkData(0, 3, 0) - tdSql.checkData(0, 4, 19) - tdSql.checkData(0, 5, 0) - tdSql.checkData(0, 6, 19) - tdSql.checkData(0, 7, 5.766281297335398) - tdSql.checkData(0, 8, 0.19) - tdSql.checkData(0, 9, rowNum) - tdSql.execute("create table strm_to as select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol), stddev(tbcol), percentile(tbcol, 1), count(tbcol), leastsquares(tbcol, 1, 1) from tb1 interval(1d)") - - tdLog.info("===== step 9 =====") - tdSql.query("select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol), stddev(tbcol), percentile(tbcol, 1), count(tbcol), leastsquares(tbcol, 1, 1) from tb1 where ts < now + 4m interval(1d)") - tdSql.checkData(0, 9, rowNum) - tdSql.execute("create table strm_wh as select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol), stddev(tbcol), percentile(tbcol, 1), count(tbcol), leastsquares(tbcol, 1, 1) from tb1 where ts < now + 4m interval(1d)") - - tdLog.info("===== step 10 =====") - tdSql.waitedQuery("select * from strm_c3", 1, 120) - tdSql.checkData(0, 1, rowNum) - tdSql.checkData(0, 2, rowNum) - tdSql.checkData(0, 3, rowNum) - - tdLog.info("===== step 11 =====") - tdSql.waitedQuery("select * from strm_c31", 1, 30) - for i in range(1, 10): - tdSql.checkData(0, i, rowNum) - - tdLog.info("===== step 12 =====") - tdSql.waitedQuery("select * from strm_avg", 1, 20) - tdSql.checkData(0, 1, 9.5) - tdSql.checkData(0, 2, 190) - tdSql.checkData(0, 3, 0) - tdSql.checkData(0, 4, 19) - tdSql.checkData(0, 5, 0) - tdSql.checkData(0, 6, 19) - - tdLog.info("===== step 13 =====") - tdSql.waitedQuery("select * from strm_ot", 1, 20) - tdSql.checkData(0, 1, 5.766281297335398) - tdSql.checkData(0, 3, 0.19) - - tdLog.info("===== step 14 =====") - tdSql.waitedQuery("select * from strm_to", 1, 20) - tdSql.checkData(0, 1, 9.5) - tdSql.checkData(0, 2, 190) - tdSql.checkData(0, 3, 0) - tdSql.checkData(0, 4, 19) - tdSql.checkData(0, 5, 0) - tdSql.checkData(0, 6, 19) - tdSql.checkData(0, 7, 5.766281297335398) - tdSql.checkData(0, 8, 0.19) - tdSql.checkData(0, 9, rowNum) - - - def stop(self): - tdSql.close() - tdLog.success("%s successfully executed" % __file__) - - -tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/stream/test1.py b/tests/pytest/stream/test1.py new file mode 100644 index 0000000000000000000000000000000000000000..d3439a7bdbbf258795a15164eb63b9278549ed8a --- /dev/null +++ b/tests/pytest/stream/test1.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + tdSql.execute('drop database if exists slmfvojuxt;') + tdSql.execute('create database if not exists slmfvojuxt vgroups 1;') + tdSql.execute('use slmfvojuxt;') + tdSql.execute('create table if not exists downsampling_stb (ts timestamp, c1 int, c2 double, c3 varchar(100), c4 bool) tags (t1 int, t2 double, t3 varchar(100), t4 bool);') + tdSql.execute('create table ownsampling_ct1 using downsampling_stb tags(10, 10.1, "beijing", True);') + tdSql.execute('create table if not exists scalar_stb (ts timestamp, c1 int, c2 double, c3 binary(20)) tags (t1 int);') + tdSql.execute('create table scalar_ct1 using scalar_stb tags(10);') + tdSql.execute('create stream downsampling_stream into output_downsampling_stb as select _wstartts AS start, min(c1), max(c2), sum(c1) from downsampling_stb interval(10m);') + tdSql.execute('create stream scalar_stream into output_scalar_stb as select ts, abs(c1) a1 , abs(c2) a2 from scalar_stb;') + tdSql.execute('insert into scalar_ct1 values (1653471881952, 100, 100.1, "beijing");') + tdSql.execute('insert into scalar_ct1 values (1653471881952+1s, -50, -50.1, "tianjin");') + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/stream/test2.py b/tests/pytest/stream/test2.py new file mode 100644 index 0000000000000000000000000000000000000000..a441174722047d7fb7819f535fe7b6c7bf55380f --- /dev/null +++ b/tests/pytest/stream/test2.py @@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.common import tdCom +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + #for i in range(100): + tdSql.prepare() + dbname = tdCom.getLongName(10, "letters") + tdSql.execute('show databases') + tdSql.execute('drop database if exists ttxkbrzmpo') + tdSql.execute('create database if not exists ttxkbrzmpo vgroups 1') + tdSql.execute('use ttxkbrzmpo') + tdSql.execute('create table if not exists downsampling_stb (ts timestamp, c1 int, c2 double, c3 varchar(100), c4 bool) tags (t1 int, t2 double, t3 varchar(100), t4 bool);') + tdSql.execute('create table downsampling_ct1 using downsampling_stb tags(10, 10.1, "Beijing", True);') + tdSql.execute('create table if not exists scalar_stb (ts timestamp, c1 int, c2 double, c3 binary(20), c4 nchar(20), c5 nchar(20)) tags (t1 int);') + tdSql.execute('create table scalar_ct1 using scalar_stb tags(10);') + tdSql.execute('create stream downsampling_stream into output_downsampling_stb as select _wstartts AS start, min(c1), max(c2), sum(c1) from downsampling_stb interval(10m);') + tdSql.execute('insert into downsampling_ct1 values (1653547828591, 100, 100.1, "Beijing", True);') + tdSql.execute('insert into downsampling_ct1 values (1653547828591+1s, -100, -100.1, "Tianjin", False);') + tdSql.execute('insert into downsampling_ct1 values (1653547828591+2s, 50, 50.3, "HeBei", False);') + tdSql.execute('select * from output_downsampling_stb;') + tdSql.execute('select start, `min(c1)`, `max(c2)`, `sum(c1)` from output_downsampling_stb;') + tdSql.execute('select _wstartts AS start, min(c1), max(c2), sum(c1) from downsampling_stb interval(10m);') + tdSql.execute('insert into downsampling_ct1 values (1653547828591+10m, 60, 60.3, "heilongjiang", True);') + tdSql.execute('insert into downsampling_ct1 values (1653547828591+11m, 70, 70.3, "JiLin", True);') + tdSql.execute('select * from output_downsampling_stb;') + tdSql.execute('select start, `min(c1)`, `max(c2)`, `sum(c1)` from output_downsampling_stb;') + tdSql.execute('select _wstartts AS start, min(c1), max(c2), sum(c1) from downsampling_stb interval(10m);') + tdSql.execute('insert into downsampling_ct1 values (1653547828591+21m, 70, 70.3, "JiLin", True);') + tdSql.execute('select * from output_downsampling_stb;') + tdSql.execute('select * from output_downsampling_stb;') + tdSql.execute('select start, `min(c1)`, `max(c2)`, `sum(c1)` from output_downsampling_stb;') + tdSql.execute('select _wstartts AS start, min(c1), max(c2), sum(c1) from downsampling_stb interval(10m);') + tdSql.execute('create stream abs_stream into output_abs_stb as select ts, abs(c1), abs(c2), c3 from scalar_stb;') + tdSql.query('describe output_abs_stb') + tdSql.execute('create stream acos_stream into output_acos_stb as select ts, acos(c1), acos(c2), c3 from scalar_stb;') + tdSql.query('describe output_acos_stb') + tdSql.execute('create stream asin_stream into output_asin_stb as select ts, asin(c1), asin(c2), c3 from scalar_stb;') + tdSql.query('describe output_asin_stb') + tdSql.execute('create stream atan_stream into output_atan_stb as select ts, atan(c1), atan(c2), c3 from scalar_stb;') + tdSql.query('describe output_atan_stb') + tdSql.execute('create stream ceil_stream into output_ceil_stb as select ts, ceil(c1), ceil(c2), c3 from scalar_stb;') + tdSql.query('describe output_ceil_stb') + tdSql.execute('create stream cos_stream into output_cos_stb as select ts, cos(c1), cos(c2), c3 from scalar_stb;') + tdSql.query('describe output_cos_stb') + tdSql.execute('create stream floor_stream into output_floor_stb as select ts, floor(c1), floor(c2), c3 from scalar_stb;') + tdSql.query('describe output_floor_stb') + tdSql.execute('create stream log_stream into output_log_stb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_stb;') + tdSql.query('describe output_log_stb') + tdSql.execute('create stream pow_stream into output_pow_stb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_stb;') + tdSql.query('describe output_pow_stb') + tdSql.execute('create stream round_stream into output_round_stb as select ts, round(c1), round(c2), c3 from scalar_stb;') + tdSql.query('describe output_round_stb') + tdSql.execute('create stream sin_stream into output_sin_stb as select ts, sin(c1), sin(c2), c3 from scalar_stb;') + tdSql.query('describe output_sin_stb') + tdSql.execute('create stream sqrt_stream into output_sqrt_stb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_stb;') + tdSql.query('describe output_sqrt_stb') + tdSql.execute('create stream tan_stream into output_tan_stb as select ts, tan(c1), tan(c2), c3 from scalar_stb;') + tdSql.query('describe output_tan_stb') + tdSql.execute('create stream char_length_stream into output_char_length_stb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_stb;') + tdSql.query('describe output_char_length_stb') + tdSql.execute('create stream concat_stream into output_concat_stb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_stb;') + tdSql.execute('create stream concat_ws_stream into output_concat_ws_stb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_stb;') + tdSql.execute('create stream length_stream into output_length_stb as select ts, length(c3), length(c4), length(c5) from scalar_stb;') + tdSql.query('describe output_length_stb') + tdSql.execute('create stream lower_stream into output_lower_stb as select ts, lower(c3), lower(c4), lower(c5) from scalar_stb;') + tdSql.query('describe output_lower_stb') + tdSql.execute('create stream ltrim_stream into output_ltrim_stb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_stb;') + tdSql.query('describe output_ltrim_stb') + tdSql.execute('create stream rtrim_stream into output_rtrim_stb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_stb;') + tdSql.query('describe output_rtrim_stb') + tdSql.execute('create stream substr_stream into output_substr_stb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_stb;') + tdSql.query('describe output_substr_stb') + tdSql.execute('create stream upper_stream into output_upper_stb as select ts, upper(c3), upper(c4), upper(c5) from scalar_stb;') + tdSql.query('describe output_upper_stb') + tdSql.execute('insert into scalar_ct1 values (1653560440733, 100, 100.1, "beijing", "taos", "Taos");') + tdSql.execute('insert into scalar_ct1 values (1653560440733+1s, -50, -50.1, "tianjin", "taosdata", "Taosdata");') + tdSql.execute('insert into scalar_ct1 values (1653560440733+2s, 0, Null, "hebei", "TDengine", Null);') + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/stream/test3.py b/tests/pytest/stream/test3.py new file mode 100644 index 0000000000000000000000000000000000000000..b45521a9476961394c1cf4b2454d6fb9e2368c68 --- /dev/null +++ b/tests/pytest/stream/test3.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.common import tdCom +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + #for i in range(100): + tdSql.prepare() + dbname = tdCom.getLongName(10, "letters") + tdSql.execute('create database if not exists djnhawvlgq vgroups 1') + tdSql.execute('use djnhawvlgq') + tdSql.execute('create table if not exists downsampling_stb (ts timestamp, c1 int, c2 double, c3 varchar(100), c4 bool) tags (t1 int, t2 double, t3 varchar(100), t4 bool);') + tdSql.execute('create table downsampling_ct1 using downsampling_stb tags(10, 10.1, "Beijing", True);') + tdSql.execute('create table if not exists scalar_stb (ts timestamp, c1 int, c2 double, c3 binary(20), c4 nchar(20), c5 nchar(20)) tags (t1 int);') + tdSql.execute('create table scalar_ct1 using scalar_stb tags(10);') + tdSql.execute('create table if not exists data_filter_stb (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(100), c8 nchar(200), c9 bool, c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(100), t8 nchar(200), t9 bool, t10 tinyint unsigned, t11 smallint unsigned, t12 int unsigned, t13 bigint unsigned)') + tdSql.execute('create table if not exists data_filter_ct1 using data_filter_stb tags (1, 2, 3, 4, 5.5, 6.6, "binary7", "nchar8", true, 11, 12, 13, 14)') + tdSql.execute('create stream data_filter_stream into output_data_filter_stb as select * from data_filter_stb where ts >= 1653648072973+1s and c1 = 1 or c2 > 1 and c3 != 4 or c4 <= 3 and c5 <> 0 or c6 is not Null or c7 is Null or c8 between "na" and "nchar4" and c8 not between "bi" and "binary" and c8 match "nchar[19]" and c8 nmatch "nchar[25]" or c9 in (1, 2, 3) or c10 not in (6, 7) and c8 like "nch%" and c7 not like "bina_" and c11 <= 10 or c12 is Null or c13 >= 4;') + tdSql.execute('insert into data_filter_ct1 values (1653648072973, 1, 1, 1, 3, 1.1, 1.1, "binary1", "nchar1", true, 1, 2, 3, 4);') + tdSql.execute('insert into data_filter_ct1 values (1653648072973+1s, 2, 2, 1, 3, 1.1, 1.1, "binary2", "nchar2", true, 2, 3, 4, 5);') + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/test-all.bat b/tests/pytest/test-all.bat new file mode 100644 index 0000000000000000000000000000000000000000..4e3ece9b565f5fecce55798684d98875e1ffb7cc --- /dev/null +++ b/tests/pytest/test-all.bat @@ -0,0 +1,27 @@ +@echo off +SETLOCAL EnableDelayedExpansion +for /F "tokens=1,2 delims=#" %%a in ('"prompt #$H#$E# & echo on & for %%b in (1) do rem"') do ( set "DEL=%%a") +set /a a=0 +@REM echo Windows Taosd Test +@REM for /F "usebackq tokens=*" %%i in (fulltest.bat) do ( +@REM echo Processing %%i +@REM set /a a+=1 +@REM call %%i ARG1 -w -m localhost > result_!a!.txt 2>error_!a!.txt +@REM if errorlevel 1 ( call :colorEcho 0c "failed" &echo. && exit 8 ) else ( call :colorEcho 0a "Success" &echo. ) +@REM ) +echo Linux Taosd Test +for /F "usebackq tokens=*" %%i in (fulltest.bat) do ( + echo Processing %%i + set /a a+=1 + call %%i ARG1 -w 1 -m %1 > result_!a!.txt 2>error_!a!.txt + if errorlevel 1 ( call :colorEcho 0c "failed" &echo. && goto :end ) else ( call :colorEcho 0a "Success" &echo. ) +) +goto :end + +:colorEcho +echo off + "%~2" +findstr /v /a:%1 /R "^$" "%~2" nul +del "%~2" > nul 2>&1i + +:end \ No newline at end of file diff --git a/tests/pytest/test.py b/tests/pytest/test.py index 97dca6be1811ee87a31661e018616f469d5fd4ca..30ab6ae3cc14e2d36f4979f03bdc99871cfcd8fa 100644 --- a/tests/pytest/test.py +++ b/tests/pytest/test.py @@ -18,6 +18,7 @@ import getopt import subprocess import time from distutils.log import warn as printf +import platform from util.log import * from util.dnodes import * @@ -35,8 +36,11 @@ if __name__ == "__main__": logSql = True stop = 0 restart = False + windows = 0 + if platform.system().lower() == 'windows': + windows = 1 opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghr', [ - 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help']) + 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart']) for key, value in opts: if key in ['-h', '--help']: tdLog.printNoPrefix( @@ -61,7 +65,7 @@ if __name__ == "__main__": deployPath = value if key in ['-m', '--master']: - masterIp = value + masterIp = value if key in ['-l', '--logSql']: if (value.upper() == "TRUE"): @@ -110,67 +114,105 @@ if __name__ == "__main__": time.sleep(2) tdLog.info('stop All dnodes') - - tdDnodes.init(deployPath) - tdDnodes.setTestCluster(testCluster) - tdDnodes.setValgrind(valgrind) - tdDnodes.stopAll() - is_test_framework = 0 - key_word = 'tdCases.addLinux' - try: - if key_word in open(fileName).read(): - is_test_framework = 1 - except: - pass - if is_test_framework: - moduleName = fileName.replace(".py", "").replace("/", ".") - uModule = importlib.import_module(moduleName) - try: - ucase = uModule.TDTestCase() - tdDnodes.deploy(1,ucase.updatecfgDict) - except : - tdDnodes.deploy(1,{}) - else: - tdDnodes.deploy(1,{}) - tdDnodes.start(1) if masterIp == "": host = '127.0.0.1' else: host = masterIp - tdLog.info("Procedures for tdengine deployed in %s" % (host)) - - tdCases.logSql(logSql) - - if testCluster: - tdLog.info("Procedures for testing cluster") - if fileName == "all": - tdCases.runAllCluster() - else: - tdCases.runOneCluster(fileName) - else: + if (windows): + tdCases.logSql(logSql) tdLog.info("Procedures for testing self-deployment") + if masterIp == "" or masterIp == "localhost": + tdDnodes.init(deployPath) + tdDnodes.setTestCluster(testCluster) + tdDnodes.setValgrind(valgrind) + tdDnodes.stopAll() + is_test_framework = 0 + key_word = 'tdCases.addWindows' + try: + if key_word in open(fileName).read(): + is_test_framework = 1 + except: + pass + if is_test_framework: + moduleName = fileName.replace(".py", "").replace(os.sep, ".") + uModule = importlib.import_module(moduleName) + try: + ucase = uModule.TDTestCase() + tdDnodes.deploy(1,ucase.updatecfgDict) + except : + tdDnodes.deploy(1,{}) + else: + pass + tdDnodes.deploy(1,{}) + tdDnodes.start(1) + else: + remote_conn = Connection("root@%s"%host) + with remote_conn.cd('/var/lib/jenkins/workspace/TDinternal/community/tests/pytest'): + remote_conn.run("python3 ./test.py") + tdDnodes.init(deployPath) conn = taos.connect( - host, - config=tdDnodes.getSimCfgPath()) - if fileName == "all": - tdCases.runAllLinux(conn) + host="%s" % (host), + config=tdDnodes.sim.getCfgDir()) + tdCases.runOneWindows(conn, fileName) + tdCases.logSql(logSql) + else: + tdDnodes.init(deployPath) + tdDnodes.setTestCluster(testCluster) + tdDnodes.setValgrind(valgrind) + tdDnodes.stopAll() + is_test_framework = 0 + key_word = 'tdCases.addLinux' + try: + if key_word in open(fileName).read(): + is_test_framework = 1 + except: + pass + if is_test_framework: + moduleName = fileName.replace(".py", "").replace("/", ".") + uModule = importlib.import_module(moduleName) + try: + ucase = uModule.TDTestCase() + tdDnodes.deploy(1,ucase.updatecfgDict) + except : + tdDnodes.deploy(1,{}) + else: + tdDnodes.deploy(1,{}) + tdDnodes.start(1) + + tdLog.info("Procedures for tdengine deployed in %s" % (host)) + + tdCases.logSql(logSql) + + if testCluster: + tdLog.info("Procedures for testing cluster") + if fileName == "all": + tdCases.runAllCluster() + else: + tdCases.runOneCluster(fileName) else: - tdCases.runOneLinux(conn, fileName) - if restart: - if fileName == "all": - tdLog.info("not need to query ") - else: - sp = fileName.rsplit(".", 1) - if len(sp) == 2 and sp[1] == "py": - tdDnodes.stopAll() - tdDnodes.start(1) - time.sleep(1) - conn = taos.connect( host, config=tdDnodes.getSimCfgPath()) - tdLog.info("Procedures for tdengine deployed in %s" % (host)) - tdLog.info("query test after taosd restart") - tdCases.runOneLinux(conn, sp[0] + "_" + "restart.py") + tdLog.info("Procedures for testing self-deployment") + conn = taos.connect( + host, + config=tdDnodes.getSimCfgPath()) + if fileName == "all": + tdCases.runAllLinux(conn) else: - tdLog.info("not need to query") + tdCases.runOneLinux(conn, fileName) + if restart: + if fileName == "all": + tdLog.info("not need to query ") + else: + sp = fileName.rsplit(".", 1) + if len(sp) == 2 and sp[1] == "py": + tdDnodes.stopAll() + tdDnodes.start(1) + time.sleep(1) + conn = taos.connect( host, config=tdDnodes.getSimCfgPath()) + tdLog.info("Procedures for tdengine deployed in %s" % (host)) + tdLog.info("query test after taosd restart") + tdCases.runOneLinux(conn, sp[0] + "_" + "restart.py") + else: + tdLog.info("not need to query") conn.close() diff --git a/tests/pytest/util/cases.py b/tests/pytest/util/cases.py index 2fc1ac8515e47f9354483ebb590897eea96dcc57..2bfd8efdcd96979d25b58d7af50bb706d91fd91d 100644 --- a/tests/pytest/util/cases.py +++ b/tests/pytest/util/cases.py @@ -34,7 +34,7 @@ class TDCases: self.clusterCases = [] def __dynamicLoadModule(self, fileName): - moduleName = fileName.replace(".py", "").replace("/", ".") + moduleName = fileName.replace(".py", "").replace(os.sep, ".") return importlib.import_module(moduleName, package='..') def logSql(self, logSql): @@ -101,8 +101,12 @@ class TDCases: for tmp in self.windowsCases: if tmp.name.find(fileName) != -1: case = testModule.TDTestCase() - case.init(conn) - case.run() + case.init(conn, self._logSql) + try: + case.run() + except Exception as e: + tdLog.notice(repr(e)) + tdLog.exit("%s failed" % (fileName)) case.stop() runNum += 1 continue diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py index 35abc4802f9de2080a6b6a166daf833c9cf04578..8c791efbc644924cfe4c1d85d6422bb671fd1216 100644 --- a/tests/pytest/util/common.py +++ b/tests/pytest/util/common.py @@ -14,23 +14,93 @@ import random import string from util.sql import tdSql - +from util.dnodes import tdDnodes +import requests +import time +import socket class TDCom: def init(self, conn, logSql): tdSql.init(conn.cursor(), logSql) - def cleanTb(self): + def preDefine(self): + header = {'Authorization': 'Basic cm9vdDp0YW9zZGF0YQ=='} + sql_url = "http://127.0.0.1:6041/rest/sql" + sqlt_url = "http://127.0.0.1:6041/rest/sqlt" + sqlutc_url = "http://127.0.0.1:6041/rest/sqlutc" + influx_url = "http://127.0.0.1:6041/influxdb/v1/write" + telnet_url = "http://127.0.0.1:6041/opentsdb/v1/put/telnet" + return header, sql_url, sqlt_url, sqlutc_url, influx_url, telnet_url + + def genTcpParam(self): + MaxBytes = 1024*1024 + host ='127.0.0.1' + port = 6046 + return MaxBytes, host, port + + def tcpClient(self, input): + MaxBytes = tdCom.genTcpParam()[0] + host = tdCom.genTcpParam()[1] + port = tdCom.genTcpParam()[2] + sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM) + sock.connect((host, port)) + sock.send(input.encode()) + sock.close() + + def restApiPost(self, sql): + requests.post(self.preDefine()[1], sql.encode("utf-8"), headers = self.preDefine()[0]) + + def createDb(self, dbname="test", db_update_tag=0, api_type="taosc"): + if api_type == "taosc": + if db_update_tag == 0: + tdSql.execute(f"drop database if exists {dbname}") + tdSql.execute(f"create database if not exists {dbname} precision 'us'") + else: + tdSql.execute(f"drop database if exists {dbname}") + tdSql.execute(f"create database if not exists {dbname} precision 'us' update 1") + elif api_type == "restful": + if db_update_tag == 0: + self.restApiPost(f"drop database if exists {dbname}") + self.restApiPost(f"create database if not exists {dbname} precision 'us'") + else: + self.restApiPost(f"drop database if exists {dbname}") + self.restApiPost(f"create database if not exists {dbname} precision 'us' update 1") + tdSql.execute(f'use {dbname}') + + def genUrl(self, url_type, dbname, precision): + if url_type == "influxdb": + if precision is None: + url = self.preDefine()[4] + "?" + "db=" + dbname + else: + url = self.preDefine()[4] + "?" + "db=" + dbname + "&precision=" + precision + elif url_type == "telnet": + url = self.preDefine()[5] + "/" + dbname + else: + url = self.preDefine()[1] + return url + + def schemalessApiPost(self, sql, url_type="influxdb", dbname="test", precision=None): + if url_type == "influxdb": + url = self.genUrl(url_type, dbname, precision) + elif url_type == "telnet": + url = self.genUrl(url_type, dbname, precision) + res = requests.post(url, sql.encode("utf-8"), headers = self.preDefine()[0]) + return res + + def cleanTb(self, type="taosc"): + ''' + type is taosc or restful + ''' query_sql = "show stables" res_row_list = tdSql.query(query_sql, True) stb_list = map(lambda x: x[0], res_row_list) for stb in stb_list: - tdSql.execute(f'drop table if exists {stb}') + if type == "taosc": + tdSql.execute(f'drop table if exists {stb}') + elif type == "restful": + self.restApiPost(f"drop table if exists {stb}") - query_sql = "show tables" - res_row_list = tdSql.query(query_sql, True) - tb_list = map(lambda x: x[0], res_row_list) - for tb in tb_list: - tdSql.execute(f'drop table if exists {tb}') + def dateToTs(self, datetime_input): + return int(time.mktime(time.strptime(datetime_input, "%Y-%m-%d %H:%M:%S.%f"))) def getLongName(self, len, mode = "mixed"): """ @@ -47,6 +117,52 @@ class TDCom: chars = ''.join(random.choice(string.ascii_letters.lower() + string.digits) for i in range(len)) return chars + def restartTaosd(self, index=1, db_name="db"): + tdDnodes.stop(index) + tdDnodes.startWithoutSleep(index) + tdSql.execute(f"use {db_name}") + + def typeof(self, variate): + v_type=None + if type(variate) is int: + v_type = "int" + elif type(variate) is str: + v_type = "str" + elif type(variate) is float: + v_type = "float" + elif type(variate) is bool: + v_type = "bool" + elif type(variate) is list: + v_type = "list" + elif type(variate) is tuple: + v_type = "tuple" + elif type(variate) is dict: + v_type = "dict" + elif type(variate) is set: + v_type = "set" + return v_type + + def splitNumLetter(self, input_mix_str): + nums, letters = "", "" + for i in input_mix_str: + if i.isdigit(): + nums += i + elif i.isspace(): + pass + else: + letters += i + return nums, letters + + def smlPass(self, func): + smlChildTableName = "no" + def wrapper(*args): + # if tdSql.getVariable("smlChildTableName")[0].upper() == "ID": + if smlChildTableName.upper() == "ID": + return func(*args) + else: + pass + return wrapper + def close(self): self.cursor.close() diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index 952aca70cf5f825f6c3217e53e73e5291b71d83e..b8cb73cb443da1e238617763f3ca87583906fe57 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -17,6 +17,10 @@ import os.path import platform import subprocess from time import sleep +import base64 +import json +import copy +from fabric2 import Connection from util.log import * @@ -67,17 +71,19 @@ class TDSimClient: if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + # cmd = "mkdir -p " + self.logDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) + os.makedirs(self.logDir) cmd = "rm -rf " + self.cfgDir if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + # cmd = "mkdir -p " + self.cfgDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) + os.makedirs(self.cfgDir) cmd = "touch " + self.cfgPath if os.system(cmd) != 0: @@ -109,6 +115,7 @@ class TDDnode: self.deployed = 0 self.testCluster = False self.valgrind = 0 + self.remoteIP = "" self.cfgDict = { "walLevel": "2", "fsync": "1000", @@ -135,8 +142,9 @@ class TDDnode: "telemetryReporting": "0" } - def init(self, path): + def init(self, path, remoteIP = ""): self.path = path + self.remoteIP = remoteIP def setTestCluster(self, value): self.testCluster = value @@ -160,6 +168,29 @@ class TDDnode: def addExtraCfg(self, option, value): self.cfgDict.update({option: value}) + def remoteExec(self, updateCfgDict, execCmd): + try: + config = eval(self.remoteIP) + remote_conn = Connection(host=config["host"], port=config["port"], user=config["user"], connect_kwargs={'password':config["password"]}) + remote_top_dir = config["path"] + except Exception as r: + remote_conn = Connection(host=self.remoteIP, port=22, user='root', connect_kwargs={'password':'123456'}) + remote_top_dir = '~/test' + valgrindStr = '' + if (self.valgrind==1): + valgrindStr = '-g' + remoteCfgDict = copy.deepcopy(updateCfgDict) + if ("logDir" in remoteCfgDict): + del remoteCfgDict["logDir"] + if ("dataDir" in remoteCfgDict): + del remoteCfgDict["dataDir"] + if ("cfgDir" in remoteCfgDict): + del remoteCfgDict["cfgDir"] + remoteCfgDictStr = base64.b64encode(json.dumps(remoteCfgDict).encode()).decode() + execCmdStr = base64.b64encode(execCmd.encode()).decode() + with remote_conn.cd((remote_top_dir+sys.path[0].replace(self.path, '')).replace('\\','/')): + remote_conn.run("python3 ./test.py %s -d %s -e %s"%(valgrindStr,remoteCfgDictStr,execCmdStr)) + def deploy(self, *updatecfgDict): self.logDir = "%s/sim/dnode%d/log" % (self.path, self.index) self.dataDir = "%s/sim/dnode%d/data" % (self.path, self.index) @@ -179,17 +210,20 @@ class TDDnode: if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.dataDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + # cmd = "mkdir -p " + self.dataDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) + os.makedirs(self.dataDir) - cmd = "mkdir -p " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + # cmd = "mkdir -p " + self.logDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) + os.makedirs(self.logDir) - cmd = "mkdir -p " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + # cmd = "mkdir -p " + self.cfgDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) + os.makedirs(self.cfgDir) cmd = "touch " + self.cfgPath if os.system(cmd) != 0: @@ -224,8 +258,11 @@ class TDDnode: self.cfg(value, key) else: self.addExtraCfg(key, value) - for key, value in self.cfgDict.items(): - self.cfg(key, value) + if (self.remoteIP == ""): + for key, value in self.cfgDict.items(): + self.cfg(key, value) + else: + self.remoteExec(self.cfgDict, "tdDnodes.deploy(%d,updateCfgDict)"%self.index) self.deployed = 1 tdLog.debug( @@ -242,11 +279,13 @@ class TDDnode: paths = [] for root, dirs, files in os.walk(projPath): - if ((tool) in files): + if ((tool) in files or ("%s.exe"%tool) in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): paths.append(os.path.join(root, tool)) break + if (len(paths) == 0): + return "" return paths[0] def start(self): @@ -261,54 +300,68 @@ class TDDnode: tdLog.exit("dnode:%d is not deployed" % (self.index)) if self.valgrind == 0: - cmd = "nohup %s -c %s > /dev/null 2>&1 & " % ( - binPath, self.cfgDir) + if platform.system().lower() == 'windows': + cmd = "mintty -h never -w hide %s -c %s" % ( + binPath, self.cfgDir) + else: + cmd = "nohup %s -c %s > /dev/null 2>&1 & " % ( + binPath, self.cfgDir) else: - valgrindCmdline = "valgrind --log-file=\"valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes" + valgrindCmdline = "valgrind --log-file=\"%s/../log/valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"%self.cfgDir - cmd = "nohup %s %s -c %s 2>&1 & " % ( - valgrindCmdline, binPath, self.cfgDir) + if platform.system().lower() == 'windows': + cmd = "mintty -h never -w hide %s %s -c %s" % ( + valgrindCmdline, binPath, self.cfgDir) + else: + cmd = "nohup %s %s -c %s 2>&1 & " % ( + valgrindCmdline, binPath, self.cfgDir) print(cmd) - if os.system(cmd) != 0: - tdLog.exit(cmd) - self.running = 1 - tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) - if self.valgrind == 0: - time.sleep(0.1) - key = 'from offline to online' - bkey = bytes(key, encoding="utf8") - logFile = self.logDir + "/taosdlog.0" - i = 0 - while not os.path.exists(logFile): - sleep(0.1) - i += 1 - if i > 50: - break - popen = subprocess.Popen( - 'tail -f ' + logFile, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - shell=True) - pid = popen.pid - # print('Popen.pid:' + str(pid)) - timeout = time.time() + 60 * 2 - while True: - line = popen.stdout.readline().strip() - if bkey in line: - popen.kill() - break - if time.time() > timeout: - tdLog.exit('wait too long for taosd start') - tdLog.debug("the dnode:%d has been started." % (self.index)) + if (not self.remoteIP == ""): + self.remoteExec(self.cfgDict, "tdDnodes.deploy(%d,updateCfgDict)\ntdDnodes.start(%d)"%(self.index, self.index)) + self.running = 1 else: - tdLog.debug( - "wait 10 seconds for the dnode:%d to start." % - (self.index)) - time.sleep(10) - - # time.sleep(5) + if os.system(cmd) != 0: + tdLog.exit(cmd) + self.running = 1 + print("dnode:%d is running with %s " % (self.index, cmd)) + tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) + if self.valgrind == 0: + time.sleep(0.1) + key = 'from offline to online' + bkey = bytes(key, encoding="utf8") + logFile = self.logDir + "/taosdlog.0" + i = 0 + while not os.path.exists(logFile): + sleep(0.1) + i += 1 + if i > 50: + break + tailCmdStr = 'tail -f ' + if platform.system().lower() == 'windows': + tailCmdStr = 'tail -n +0 -f ' + popen = subprocess.Popen( + tailCmdStr + logFile, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=True) + pid = popen.pid + # print('Popen.pid:' + str(pid)) + timeout = time.time() + 60 * 2 + while True: + line = popen.stdout.readline().strip() + if bkey in line: + popen.kill() + break + if time.time() > timeout: + tdLog.exit('wait too long for taosd start') + tdLog.debug("the dnode:%d has been started." % (self.index)) + else: + tdLog.debug( + "wait 10 seconds for the dnode:%d to start." % + (self.index)) + time.sleep(10) def startWithoutSleep(self): binPath = self.getPath() @@ -325,19 +378,26 @@ class TDDnode: cmd = "nohup %s -c %s > /dev/null 2>&1 & " % ( binPath, self.cfgDir) else: - valgrindCmdline = "valgrind --log-file=\"valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes" + valgrindCmdline = "valgrind --log-file=\"%s/../log/valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"%self.cfgDir cmd = "nohup %s %s -c %s 2>&1 & " % ( valgrindCmdline, binPath, self.cfgDir) print(cmd) - if os.system(cmd) != 0: - tdLog.exit(cmd) + if (self.remoteIP == ""): + if os.system(cmd) != 0: + tdLog.exit(cmd) + else: + self.remoteExec(self.cfgDict, "tdDnodes.deploy(%d,updateCfgDict)\ntdDnodes.startWithoutSleep(%d)"%(self.index, self.index)) + self.running = 1 tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) def stop(self): + if (not self.remoteIP == ""): + self.remoteExec(self.cfgDict, "tdDnodes.stop(%d)"%self.index) + return if self.valgrind == 0: toBeKilled = "taosd" else: @@ -354,9 +414,10 @@ class TDDnode: time.sleep(1) processID = subprocess.check_output( psCmd, shell=True).decode("utf-8") - for port in range(6030, 6041): - fuserCmd = "fuser -k -n tcp %d" % port - os.system(fuserCmd) + if not platform.system().lower() == 'windows': + for port in range(6030, 6041): + fuserCmd = "fuser -k -n tcp %d" % port + os.system(fuserCmd) if self.valgrind: time.sleep(2) @@ -364,6 +425,9 @@ class TDDnode: tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index)) def forcestop(self): + if (not self.remoteIP == ""): + self.remoteExec(self.cfgDict, "tdDnodes.forcestop(%d)"%self.index) + return if self.valgrind == 0: toBeKilled = "taosd" else: @@ -428,8 +492,10 @@ class TDDnodes: self.dnodes.append(TDDnode(9)) self.dnodes.append(TDDnode(10)) self.simDeployed = False + self.testCluster = False + self.valgrind = 0 - def init(self, path): + def init(self, path, remoteIP = ""): psCmd = "ps -ef|grep -w taosd| grep -v grep| grep -v defunct | awk '{print $2}'" processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") while(processID): @@ -449,9 +515,9 @@ class TDDnodes: psCmd, shell=True).decode("utf-8") binPath = self.dnodes[0].getPath() + "/../../../" - tdLog.debug("binPath %s" % (binPath)) + # tdLog.debug("binPath %s" % (binPath)) binPath = os.path.realpath(binPath) - tdLog.debug("binPath real path %s" % (binPath)) + # tdLog.debug("binPath real path %s" % (binPath)) # cmd = "sudo cp %s/build/lib/libtaos.so /usr/local/lib/taos/" % (binPath) # tdLog.debug(cmd) @@ -474,8 +540,7 @@ class TDDnodes: self.path = os.path.realpath(path) for i in range(len(self.dnodes)): - self.dnodes[i].init(self.path) - + self.dnodes[i].init(self.path, remoteIP) self.sim = TDSimClient(self.path) def setTestCluster(self, value): diff --git a/tests/pytest/util/types.py b/tests/pytest/util/types.py new file mode 100644 index 0000000000000000000000000000000000000000..218a4770269328a5ef7161cc56c0e0dc0c420f73 --- /dev/null +++ b/tests/pytest/util/types.py @@ -0,0 +1,38 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from enum import Enum + +class TDSmlProtocolType(Enum): + ''' + Schemaless Protocol types + 0 - unknown + 1 - InfluxDB Line Protocol + 2 - OpenTSDB Telnet Protocl + 3 - OpenTSDB JSON Protocol + ''' + UNKNOWN = 0 + LINE = 1 + TELNET = 2 + JSON = 3 + +class TDSmlTimestampType(Enum): + NOT_CONFIGURED = 0 + HOUR = 1 + MINUTE = 2 + SECOND = 3 + MILLI_SECOND = 4 + MICRO_SECOND = 5 + NANO_SECOND = 6 + + diff --git a/tests/pytest/wal/addOldWalTest.py b/tests/pytest/wal/addOldWalTest.py index 2f4dcd5ce807cf7bbadfa480af6ed6342058a78a..36056d1bc2d0bef786cf4a4092521867f861b93b 100644 --- a/tests/pytest/wal/addOldWalTest.py +++ b/tests/pytest/wal/addOldWalTest.py @@ -31,7 +31,7 @@ class TDTestCase: def createOldDirAndAddWal(self): oldDir = tdDnodes.getDnodesRootDir() + "dnode1/data/vnode/vnode2/wal/old" - os.system("sudo echo 'test' >> %s/wal" % oldDir) + os.system("sudo echo test >> %s/wal" % oldDir) def run(self): diff --git a/tests/script/general/alter/cached_schema_after_alter.sim b/tests/script/general/alter/cached_schema_after_alter.sim index 96ee4390845450d53508cc90c48a3148a0a827dd..043f360856e4b4f0533bf4dc5e4be7cea71c3325 100644 --- a/tests/script/general/alter/cached_schema_after_alter.sim +++ b/tests/script/general/alter/cached_schema_after_alter.sim @@ -1,9 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 2 system sh/exec.sh -n dnode1 -s start -sleep 2000 sql connect $db = csaa_db diff --git a/tests/script/general/alter/dnode.sim b/tests/script/general/alter/dnode.sim index 7b31218fc231cfdbb79ca97573cfc6f6f149037d..64e8a17de02c956a937aa1001ac4d5873a6bed21 100644 --- a/tests/script/general/alter/dnode.sim +++ b/tests/script/general/alter/dnode.sim @@ -1,10 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 2 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect print ======== step1 diff --git a/tests/script/general/alter/import.sim b/tests/script/general/alter/import.sim index aef0a258b24563e915cd8aa3dd42f6623a29170a..175e084b7f1aa73a1c8b599752fd0b7de59efda7 100644 --- a/tests/script/general/alter/import.sim +++ b/tests/script/general/alter/import.sim @@ -1,13 +1,8 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 2 -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1 -system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4 print ========= start dnode1 as master system sh/exec.sh -n dnode1 -s start -sleep 2000 sql connect print ======== step1 diff --git a/tests/script/general/alter/insert1.sim b/tests/script/general/alter/insert1.sim index 12ab09beb989dd963a9e8c9c3ff5926e78d8b0ac..82781f2fe5cadf0488c5107e9e54b06364629680 100644 --- a/tests/script/general/alter/insert1.sim +++ b/tests/script/general/alter/insert1.sim @@ -1,10 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 2 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect print ======== step1 diff --git a/tests/script/general/alter/insert2.sim b/tests/script/general/alter/insert2.sim index dcd9f500304f906ddddb33bd1a04c5943c232d49..a30175f3980cc117ec052ebb13a2e0b31b2cb316 100644 --- a/tests/script/general/alter/insert2.sim +++ b/tests/script/general/alter/insert2.sim @@ -1,10 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 2 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect print ======== step1 diff --git a/tests/script/general/alter/metrics.sim b/tests/script/general/alter/metrics.sim index fd0b210cd1b452b2a35ebcd9f74aec98c3817b03..ec8c980c16adcf512975e54fa492d3c22b12c195 100644 --- a/tests/script/general/alter/metrics.sim +++ b/tests/script/general/alter/metrics.sim @@ -1,10 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 2 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect print ======== step1 diff --git a/tests/script/general/alter/table.sim b/tests/script/general/alter/table.sim index 06704eeca6b3149b47ddc2ffb90aaab9df934bd8..cd0397760276c775d170e90831f6674880cb8f81 100644 --- a/tests/script/general/alter/table.sim +++ b/tests/script/general/alter/table.sim @@ -1,10 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 2 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect print ======== step1 diff --git a/tests/script/general/alter/testSuite.sim b/tests/script/general/alter/testSuite.sim deleted file mode 100644 index cfac68144c080593499159eec81325924e7f25e6..0000000000000000000000000000000000000000 --- a/tests/script/general/alter/testSuite.sim +++ /dev/null @@ -1,7 +0,0 @@ -run general/alter/cached_schema_after_alter.sim -run general/alter/count.sim -run general/alter/import.sim -run general/alter/insert1.sim -run general/alter/insert2.sim -run general/alter/metrics.sim -run general/alter/table.sim \ No newline at end of file diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index bb96e3642bb6163be2c24ae4bc6047d8d35e1e41..217c23158dd08739caea79d5b74679d4da291968 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -29,6 +29,7 @@ ./test.sh -f tsim/insert/backquote.sim ./test.sh -f tsim/insert/null.sim ./test.sh -f tsim/insert/update0.sim +./test.sh -f tsim/insert/commit-merge0.sim # ---- parser ./test.sh -f tsim/parser/groupby-basic.sim @@ -55,6 +56,8 @@ # ---- mnode ./test.sh -f tsim/mnode/basic1.sim +./test.sh -f tsim/mnode/basic2.sim +./test.sh -f tsim/mnode/basic3.sim # ---- show ./test.sh -f tsim/show/basic.sim @@ -62,11 +65,15 @@ # ---- table ./test.sh -f tsim/table/basic1.sim -# ---- tstream -./test.sh -f tsim/tstream/basic0.sim -./test.sh -f tsim/tstream/basic1.sim +# ---- stream +./test.sh -f tsim/stream/basic0.sim +./test.sh -f tsim/stream/basic1.sim +./test.sh -f tsim/stream/basic2.sim +# ./test.sh -f tsim/stream/session0.sim +# ./test.sh -f tsim/stream/session1.sim # ---- transaction + ./test.sh -f tsim/trans/lossdata1.sim ./test.sh -f tsim/trans/create_db.sim # ---- tmq @@ -82,15 +89,21 @@ ./test.sh -f tsim/tmq/topic.sim # --- stable -./test.sh -f tsim/stable/alter1.sim ./test.sh -f tsim/stable/disk.sim ./test.sh -f tsim/stable/dnode3.sim ./test.sh -f tsim/stable/metrics.sim ./test.sh -f tsim/stable/refcount.sim -#./test.sh -f tsim/stable/show.sim +./test.sh -f tsim/stable/show.sim ./test.sh -f tsim/stable/values.sim ./test.sh -f tsim/stable/vnode3.sim - +./test.sh -f tsim/stable/column_add.sim +./test.sh -f tsim/stable/column_drop.sim +./test.sh -f tsim/stable/column_modify.sim +./test.sh -f tsim/stable/tag_add.sim +./test.sh -f tsim/stable/tag_drop.sim +./test.sh -f tsim/stable/tag_modify.sim +./test.sh -f tsim/stable/tag_rename.sim +./test.sh -f tsim/stable/alter_comment.sim # --- for multi process mode ./test.sh -f tsim/user/basic1.sim -m @@ -102,13 +115,19 @@ ./test.sh -f tsim/tmq/basic3.sim -m ./test.sh -f tsim/stable/vnode3.sim -m ./test.sh -f tsim/qnode/basic1.sim -m -./test.sh -f tsim/mnode/basic1.sim -m +#./test.sh -f tsim/mnode/basic1.sim -m # --- sma -./test.sh -f tsim/sma/tsmaCreateInsertData.sim +#./test.sh -f tsim/sma/tsmaCreateInsertData.sim ./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim # --- valgrind ./test.sh -f tsim/valgrind/checkError.sim -v +# --- sync +./test.sh -f tsim/sync/3Replica1VgElect.sim +./test.sh -f tsim/sync/3Replica5VgElect.sim +./test.sh -f tsim/sync/oneReplica1VgElect.sim +./test.sh -f tsim/sync/oneReplica5VgElect.sim + #======================b1-end=============== diff --git a/tests/script/sh/deploy.sh b/tests/script/sh/deploy.sh index da295f640e01cbf5cab4919aafc6cf56f1a268fc..5edc0a4d3e858d48e11eb3eea8d2fd48244b08ee 100755 --- a/tests/script/sh/deploy.sh +++ b/tests/script/sh/deploy.sh @@ -136,7 +136,7 @@ echo "qDebugFlag 143" >> $TAOS_CFG echo "rpcDebugFlag 143" >> $TAOS_CFG echo "tmrDebugFlag 131" >> $TAOS_CFG echo "uDebugFlag 143" >> $TAOS_CFG -echo "sDebugFlag 135" >> $TAOS_CFG +echo "sDebugFlag 143" >> $TAOS_CFG echo "wDebugFlag 143" >> $TAOS_CFG echo "numOfLogLines 20000000" >> $TAOS_CFG echo "statusInterval 1" >> $TAOS_CFG diff --git a/tests/script/tsim/dnode/basic1.sim b/tests/script/tsim/dnode/basic1.sim index d49dba60f3940094245c0a9f82a912d3a97155c4..d5c791e902aef3404f854287cef6224767080f82 100644 --- a/tests/script/tsim/dnode/basic1.sim +++ b/tests/script/tsim/dnode/basic1.sim @@ -7,6 +7,7 @@ sql connect print =============== show dnodes sql show dnodes; +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] if $rows != 1 then return -1 endi @@ -15,12 +16,9 @@ if $data00 != 1 then return -1 endi -# check 'vnodes' feild ? -#if $data02 != 0 then -# return -1 -#endi sql show mnodes; +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] if $rows != 1 then return -1 endi diff --git a/tests/script/tsim/insert/commit-merge0.sim b/tests/script/tsim/insert/commit-merge0.sim new file mode 100644 index 0000000000000000000000000000000000000000..adbd1904b2071ccc68f978ea1c2da40e208adc93 --- /dev/null +++ b/tests/script/tsim/insert/commit-merge0.sim @@ -0,0 +1,262 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print =============== create database +sql create database db days 300 keep 365000d,365000d,365000d +sql show databases +if $rows != 3 then + return -1 +endi + +print $data00 $data01 $data02 + +sql use db +sql create table stb1(ts timestamp, c6 double) tags (t1 int); +sql create table ct1 using stb1 tags ( 1 ); +sql create table ct2 using stb1 tags ( 2 ); +sql create table ct3 using stb1 tags ( 3 ); +sql create table ct4 using stb1 tags ( 4 ); +sql insert into ct1 values ('2022-05-01 18:30:27.001', 0.0); +sql insert into ct4 values ('2022-04-28 18:30:27.002', 0.0); +sql insert into ct1 values ('2022-05-01 18:30:17.003', 11.11); +sql insert into ct4 values ('2022-02-01 18:30:27.004', 11.11); +sql insert into ct1 values ('2022-05-01 18:30:07.005', 22.22); +sql insert into ct4 values ('2021-11-01 18:30:27.006', 22.22); +sql insert into ct1 values ('2022-05-01 18:29:27.007', 33.33); +sql insert into ct4 values ('2022-08-01 18:30:27.008', 33.33); +sql insert into ct1 values ('2022-05-01 18:20:27.009', 44.44); +sql insert into ct4 values ('2021-05-01 18:30:27.010', 44.44); +sql insert into ct1 values ('2022-05-01 18:21:27.011', 55.55); +sql insert into ct4 values ('2021-01-01 18:30:27.012', 55.55); +sql insert into ct1 values ('2022-05-01 18:22:27.013', 66.66); +sql insert into ct4 values ('2020-06-01 18:30:27.014', 66.66); +sql insert into ct1 values ('2022-05-01 18:28:37.015', 77.77); +sql insert into ct4 values ('2020-05-01 18:30:27.016', 77.77); +sql insert into ct1 values ('2022-05-01 18:29:17.017', 88.88); +sql insert into ct4 values ('2019-05-01 18:30:27.018', 88.88); +sql insert into ct1 values ('2022-05-01 18:30:20.019', 0); +sql insert into ct1 values ('2022-05-01 18:30:47.020', -99.99); +sql insert into ct1 values ('2022-05-01 18:30:49.021', NULL); +sql insert into ct1 values ('2022-05-01 18:30:51.022', -99.99); +sql insert into ct4 values ('2018-05-01 18:30:27.023', NULL) ; +sql insert into ct4 values ('2021-03-01 18:30:27.024', NULL) ; +sql insert into ct4 values ('2022-08-01 18:30:27.025', NULL) ; + +print =============== select * from ct1 - memory +sql select * from stb1; +if $rows != 25 then + print rows = $rows != 25 + return -1 +endi + + +print =============== stop and restart taosd + +$reboot_max = 10; + +$reboot_cnt = 0 + +reboot_and_check: + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode1 -s start + +$loop_cnt = 0 +check_dnode_ready: + $loop_cnt = $loop_cnt + 1 + sleep 200 + if $loop_cnt == 10 then + print ====> dnode not ready! + return -1 + endi +sql show dnodes +print ===> $rows $data00 $data01 $data02 $data03 $data04 $data05 +if $data00 != 1 then + return -1 +endi +if $data04 != ready then + goto check_dnode_ready +endi + +print =============== insert duplicated records to memory - loop $reboot_max - $reboot_cnt +sql use db +sql insert into ct1 values ('2022-05-01 18:30:27.001', 0.0); +sql insert into ct4 values ('2022-04-28 18:30:27.002', 0.0); +sql insert into ct1 values ('2022-05-01 18:30:17.003', 11.11); +sql insert into ct4 values ('2022-02-01 18:30:27.004', 11.11); +sql insert into ct1 values ('2022-05-01 18:30:07.005', 22.22); +sql insert into ct4 values ('2021-11-01 18:30:27.006', 22.22); +sql insert into ct1 values ('2022-05-01 18:29:27.007', 33.33); +sql insert into ct4 values ('2022-08-01 18:30:27.008', 33.33); +sql insert into ct1 values ('2022-05-01 18:20:27.009', 44.44); +sql insert into ct4 values ('2021-05-01 18:30:27.010', 44.44); +sql insert into ct1 values ('2022-05-01 18:21:27.011', 55.55); +sql insert into ct4 values ('2021-01-01 18:30:27.012', 55.55); +sql insert into ct1 values ('2022-05-01 18:22:27.013', 66.66); +sql insert into ct4 values ('2020-06-01 18:30:27.014', 66.66); +sql insert into ct1 values ('2022-05-01 18:28:37.015', 77.77); +sql insert into ct4 values ('2020-05-01 18:30:27.016', 77.77); +sql insert into ct1 values ('2022-05-01 18:29:17.017', 88.88); +sql insert into ct4 values ('2019-05-01 18:30:27.018', 88.88); +sql insert into ct1 values ('2022-05-01 18:30:20.019', 0); +sql insert into ct1 values ('2022-05-01 18:30:47.020', -99.99); +sql insert into ct1 values ('2022-05-01 18:30:49.021', NULL); +sql insert into ct1 values ('2022-05-01 18:30:51.022', -99.99); +sql insert into ct4 values ('2018-05-01 18:30:27.023', NULL) ; +sql insert into ct4 values ('2021-03-01 18:30:27.024', NULL) ; +sql insert into ct4 values ('2022-08-01 18:30:27.025', NULL) ; + +print =============== select * from ct1 - merge memory and file - loop $reboot_max - $reboot_cnt +sql select * from ct1; +if $rows != 13 then + print rows = $rows != 13 + return -1 +endi +print $data[0][0] $data[0][1] +print $data[1][0] $data[1][1] +print $data[2][0] $data[2][1] +print $data[3][0] $data[3][1] +print $data[4][0] $data[4][1] +print $data[5][0] $data[5][1] +print $data[6][0] $data[6][1] +print $data[7][0] $data[7][1] +print $data[8][0] $data[8][1] +print $data[9][0] $data[9][1] +print $data[10][0] $data[10][1] +print $data[11][0] $data[11][1] +print $data[12][0] $data[12][1] + +if $data[0][1] != 44.440000000 then + print $data[0][1] != 44.440000000 + return -1 +endi +if $data[1][1] != 55.550000000 then + print $data[1][1] != 55.550000000 + return -1 +endi +if $data[2][1] != 66.660000000 then + print $data[2][1] != 66.660000000 + return -1 +endi +if $data[3][1] != 77.770000000 then + print $data[3][1] != 77.770000000 + return -1 +endi +if $data[4][1] != 88.880000000 then + print $data[4][1] != 88.880000000 + return -1 +endi +if $data[5][1] != 33.330000000 then + print $data[5][1] != 33.330000000 + return -1 +endi +if $data[6][1] != 22.220000000 then + print $data[6][1] != 22.220000000 + return -1 +endi +if $data[7][1] != 11.110000000 then + print $data[7][1] != 11.110000000 + return -1 +endi +if $data[8][1] != 0.000000000 then + print $data[8][1] != 0.000000000 + return -1 +endi +if $data[9][1] != 0.000000000 then + print $data[9][1] != 0.000000000 + return -1 +endi +if $data[10][1] != -99.990000000 then + print $data[10][1] != -99.990000000 + return -1 +endi +if $data[11][1] != NULL then + print $data[11][1] != NULL + return -1 +endi +if $data[12][1] != -99.990000000 then + print $data[12][1] != -99.990000000 + return -1 +endi + +print =============== select * from ct4 - merge memory and file - loop $reboot_max - $reboot_cnt +sql select * from ct4; +if $rows != 12 then + print rows = $rows != 12 + return -1 +endi + +print $data[0][0] $data[0][1] +print $data[1][0] $data[1][1] +print $data[2][0] $data[2][1] +print $data[3][0] $data[3][1] +print $data[4][0] $data[4][1] +print $data[5][0] $data[5][1] +print $data[6][0] $data[6][1] +print $data[7][0] $data[7][1] +print $data[8][0] $data[8][1] +print $data[9][0] $data[9][1] +print $data[10][0] $data[10][1] +print $data[11][0] $data[11][1] + +if $data[0][1] != NULL then + print $data[0][1] != NULL + return -1 +endi +if $data[1][1] != 88.880000000 then + print $data[1][1] != 88.880000000 + return -1 +endi +if $data[2][1] != 77.770000000 then + print $data[2][1] != 77.770000000 + return -1 +endi +if $data[3][1] != 66.660000000 then + print $data[3][1] != 66.660000000 + return -1 +endi +if $data[4][1] != 55.550000000 then + print $data[4][1] != 55.550000000 + return -1 +endi +if $data[5][1] != NULL then + print $data[5][1] != NULL + return -1 +endi +if $data[6][1] != 44.440000000 then + print $data[6][1] != 44.440000000 + return -1 +endi +if $data[7][1] != 22.220000000 then + print $data[7][1] != 22.220000000 + return -1 +endi +if $data[8][1] != 11.110000000 then + print $data[8][1] != 11.110000000 + return -1 +endi +if $data[9][1] != 0.000000000 then + print $data[9][1] != 0.000000000 + return -1 +endi +if $data[10][1] != 33.330000000 then + print $data[10][1] != 33.330000000 + return -1 +endi +if $data[11][1] != NULL then + print $data[11][1] != NULL + return -1 +endi + + +if $reboot_cnt > $reboot_max then + print reboot_cnt $reboot_cnt > reboot_max $reboot_max + return 0 +else + print reboot_cnt $reboot_cnt <= reboot_max $reboot_max + $reboot_cnt = $reboot_cnt + 1 + goto reboot_and_check +endi diff --git a/tests/script/tsim/insert/update0.sim b/tests/script/tsim/insert/update0.sim index 89eecaf86038ab05e98fc0155c7d839d82f43088..3cb5e4008e3a57e3178721b7e3f5458ef07be52b 100644 --- a/tests/script/tsim/insert/update0.sim +++ b/tests/script/tsim/insert/update0.sim @@ -99,6 +99,23 @@ endi system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode1 -s start +$loop_cnt = 0 +check_dnode_ready: + $loop_cnt = $loop_cnt + 1 + sleep 200 + if $loop_cnt == 10 then + print ====> dnode not ready! + return -1 + endi +sql show dnodes +print ===> $rows $data00 $data01 $data02 $data03 $data04 $data05 +if $data00 != 1 then + return -1 +endi +if $data04 != ready then + goto check_dnode_ready +endi + print =============== step3-2 query records of ct1 from file sql select * from ct1; print $data00 $data01 diff --git a/tests/script/tsim/mnode/basic1.sim b/tests/script/tsim/mnode/basic1.sim index 235889ece6da4aa2713d5dac2cc306f52cd694cd..e922ebe37605d64d637e63aa176b53af93b06921 100644 --- a/tests/script/tsim/mnode/basic1.sim +++ b/tests/script/tsim/mnode/basic1.sim @@ -6,15 +6,6 @@ system sh/exec.sh -n dnode2 -s start sql connect print =============== show dnodes -sql show dnodes; -if $rows != 1 then - return -1 -endi - -if $data00 != 1 then - return -1 -endi - sql show mnodes; if $rows != 1 then return -1 @@ -30,88 +21,125 @@ endi print =============== create dnodes sql create dnode $hostname port 7200 -sleep 2000 +$x = 0 +step1: + $x = $x + 1 + sleep 500 + if $x == 20 then + return -1 + endi +sql show dnodes -x step1 +if $data(1)[4] != ready then + goto step1 +endi +if $data(2)[4] != ready then + goto step1 +endi + +sql_error create mnode on dnode 1 +sql_error drop mnode on dnode 1 + +print =============== create mnode 2 +sql create mnode on dnode 2 + +$x = 0 +step2: + $x = $x + 1 + sleep 1000 + if $x == 20 then + return -1 + endi +sql show mnodes +print $data(1)[0] $data(1)[1] $data(1)[2] +print $data(2)[0] $data(2)[1] $data(2)[2] -sql show dnodes; if $rows != 2 then return -1 endi - -if $data00 != 1 then +if $data(1)[0] != 1 then return -1 endi - -if $data10 != 2 then +if $data(1)[2] != LEADER then return -1 endi - -print $data02 -if $data02 != 0 then +if $data(2)[0] != 2 then return -1 endi - -if $data12 != 0 then - return -1 +if $data(2)[2] != FOLLOWER then + goto step2 endi -if $data04 != ready then +sleep 2000 +print ============ drop mnode 2 +sql drop mnode on dnode 2 +sql show mnodes +if $rows != 1 then return -1 endi +sql_error drop mnode on dnode 2 -if $data14 != ready then - return -1 -endi +$x = 0 +step2: + $x = $x + 1 + sleep 1000 + if $x == 20 then + return -1 + endi +sql show mnodes +print $data(1)[0] $data(1)[1] $data(1)[2] +print $data(2)[0] $data(2)[1] $data(2)[2] -sql show mnodes; if $rows != 1 then return -1 endi - -if $data00 != 1 then +if $data(1)[0] != 1 then return -1 endi - -if $data02 != LEADER then +if $data(1)[2] != LEADER then return -1 endi +if $data(2)[0] != null then + goto step2 +endi +if $data(2)[2] != null then + goto step2 +endi -print =============== create drop mnode 1 -sql_error create mnode on dnode 1 -sql_error drop mnode on dnode 1 +sleep 2000 -print =============== create drop mnode 2 +print =============== create mnodes sql create mnode on dnode 2 sql show mnodes if $rows != 2 then return -1 endi -sql_error create mnode on dnode 2 -sql drop mnode on dnode 2 +$x = 0 +step3: + $x = $x + 1 + sleep 1000 + if $x == 20 then + return -1 + endi sql show mnodes -if $rows != 1 then - return -1 -endi -sql_error drop mnode on dnode 2 +print $data(1)[0] $data(1)[1] $data(1)[2] +print $data(2)[0] $data(2)[1] $data(2)[2] -print =============== create drop mnodes -sql create mnode on dnode 2 -sql show mnodes if $rows != 2 then return -1 endi - -print =============== restart -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode1 -s start -system sh/exec.sh -n dnode2 -s start - -sleep 2000 -sql show mnodes -if $rows != 2 then +if $data(1)[0] != 1 then + return -1 +endi +if $data(1)[2] != LEADER then return -1 endi +if $data(2)[0] != 2 then + return -1 +endi +if $data(2)[2] != FOLLOWER then + goto step3 +endi system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode2 -s stop -x SIGINT diff --git a/tests/script/tsim/mnode/basic2.sim b/tests/script/tsim/mnode/basic2.sim new file mode 100644 index 0000000000000000000000000000000000000000..18aa85cf5bb00a579b8ed7be14b264845a37948a --- /dev/null +++ b/tests/script/tsim/mnode/basic2.sim @@ -0,0 +1,134 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start +sql connect + +print =============== show dnodes +sql show mnodes; +if $rows != 1 then + return -1 +endi + +if $data00 != 1 then + return -1 +endi + +if $data02 != LEADER then + return -1 +endi + +print =============== create dnodes +sql create dnode $hostname port 7200 +$x = 0 +step1: + $x = $x + 1 + sleep 500 + if $x == 20 then + return -1 + endi +sql show dnodes -x step1 +if $data(1)[4] != ready then + goto step1 +endi +if $data(2)[4] != ready then + goto step1 +endi + +print =============== create mnode 2 +sql create mnode on dnode 2 + +$x = 0 +step2: + $x = $x + 1 + sleep 1000 + if $x == 20 then + return -1 + endi +sql show mnodes +print $data(1)[0] $data(1)[1] $data(1)[2] +print $data(2)[0] $data(2)[1] $data(2)[2] + +if $rows != 2 then + return -1 +endi +if $data(1)[0] != 1 then + return -1 +endi +if $data(1)[2] != LEADER then + return -1 +endi +if $data(2)[0] != 2 then + return -1 +endi +if $data(2)[2] != FOLLOWER then + goto step2 +endi + +print =============== create user +sql create user user1 PASS 'user1' +sql show users +if $rows != 2 then + return -1 +endi + +sql create database db +sql show databases +if $rows != 3 then + return -1 +endi + +sleep 5000 + +print =============== restart +system sh/exec.sh -n dnode1 -s stop +system sh/exec.sh -n dnode2 -s stop +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start + +sql connect +sql show mnodes +if $rows != 2 then + return -1 +endi + +sql show users +if $rows != 2 then + return -1 +endi + +sql show databases +if $rows != 3 then + return -1 +endi + +$x = 0 +step3: + $x = $x + 1 + sleep 500 + if $x == 20 then + return -1 + endi +sql show dnodes -x step3 +if $data(1)[4] != ready then + goto step3 +endi +if $data(2)[4] != ready then + goto step3 +endi + +print =============== insert data +sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 float, t3 binary(16)) comment "abd" +sql create table db.ctb using db.stb tags(101, 102, "103") +sql insert into db.ctb values(now, 1, "2") + +sql select * from db.ctb +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] + +if $rows != 1 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop +system sh/exec.sh -n dnode2 -s stop \ No newline at end of file diff --git a/tests/script/tsim/mnode/basic3.sim b/tests/script/tsim/mnode/basic3.sim new file mode 100644 index 0000000000000000000000000000000000000000..b0ee23cd8c15e95d26a12659d77fad0ebc0770dc --- /dev/null +++ b/tests/script/tsim/mnode/basic3.sim @@ -0,0 +1,137 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/deploy.sh -n dnode3 -i 3 +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +sql connect + +print =============== step1: create dnodes +sql create dnode $hostname port 7200 +sql create dnode $hostname port 7300 + +$x = 0 +step1: + $x = $x + 1 + sleep 1000 + if $x == 50 then + return -1 + endi +sql show dnodes -x step1 +if $data(1)[4] != ready then + goto step1 +endi +if $data(2)[4] != ready then + goto step1 +endi +if $data(3)[4] != ready then + goto step1 +endi + +print =============== step2: create mnode 2 +sql create mnode on dnode 2 +sql create mnode on dnode 3 + +$x = 0 +step2: + $x = $x + 1 + sleep 1000 + if $x == 50 then + return -1 + endi +sql show mnodes -x step2 +if $data(1)[2] != LEADER then + goto step2 +endi +if $data(2)[2] != FOLLOWER then + goto step2 +endi +if $data(3)[2] != FOLLOWER then + goto step2 +endi + +print =============== step3: create user +sql create user user1 PASS 'user1' +sql show users +if $rows != 2 then + return -1 +endi + +# wait mnode2 mnode3 recv data finish +sleep 10000 + +print =============== step4: stop dnode1 +system sh/exec.sh -n dnode1 -s stop + +$x = 0 +step4: + $x = $x + 1 + sleep 1000 + if $x == 50 then + return -1 + endi +sql show mnodes -x step4 +print $data(1)[0] $data(1)[1] $data(1)[2] +print $data(2)[0] $data(2)[1] $data(2)[2] +print $data(3)[0] $data(3)[1] $data(3)[2] + +sql show users +if $rows != 2 then + return -1 +endi + +sleep 1000 +sql show dnodes +if $data(2)[4] != ready then + return -1 +endi +if $data(3)[4] != ready then + return -1 +endi + +print =============== step5: stop dnode1 +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s stop + +$x = 0 +step5: + $x = $x + 1 + sleep 1000 + if $x == 50 then + return -1 + endi +sql show mnodes -x step5 +print $data(1)[0] $data(1)[1] $data(1)[2] +print $data(2)[0] $data(2)[1] $data(2)[2] +print $data(3)[0] $data(3)[1] $data(3)[2] + +sql show users +if $rows != 2 then + return -1 +endi + +print =============== step6: stop dnode1 +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s stop + +$x = 0 +step6: + $x = $x + 1 + sleep 1000 + if $x == 50 then + return -1 + endi +sql show mnodes -x step6 +print $data(1)[0] $data(1)[1] $data(1)[2] +print $data(2)[0] $data(2)[1] $data(2)[2] +print $data(3)[0] $data(3)[1] $data(3)[2] + +sql show users +if $rows != 2 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop +system sh/exec.sh -n dnode2 -s stop +system sh/exec.sh -n dnode3 -s stop \ No newline at end of file diff --git a/tests/script/tsim/show/basic.sim b/tests/script/tsim/show/basic.sim index e171d1abb9e3249c5b93ae01899b596e6238a4c4..95201bc48e0db1b57471039c80169ccdf4e30094 100644 --- a/tests/script/tsim/show/basic.sim +++ b/tests/script/tsim/show/basic.sim @@ -25,7 +25,7 @@ sql connect # select */column from information_schema.xxxx; xxxx include: # dnodes, mnodes, modules, qnodes, -# user_databases, user_functions, user_indexes, user_stables, user_streams, +# user_databases, user_functions, user_indexes, user_stables, streams, # user_tables, user_table_distributed, user_users, vgroups, print =============== add dnode2 into cluster @@ -96,7 +96,7 @@ sql select * from information_schema.user_stables if $rows != 1 then return -1 endi -#sql select * from information_schema.user_streams +#sql select * from information_schema.`streams` sql select * from information_schema.user_tables if $rows != 28 then return -1 @@ -194,7 +194,7 @@ sql select * from information_schema.user_stables if $rows != 1 then return -1 endi -#sql select * from information_schema.user_streams +#sql select * from performance_schema.`streams` sql select * from information_schema.user_tables if $rows != 28 then return -1 diff --git a/tests/script/tsim/sma/rsmaCreateInsertQuery.sim b/tests/script/tsim/sma/rsmaCreateInsertQuery.sim index 38ae0dc0a298d7743f3eb1466357ff0bbb621d06..5d9425e5064d3fc65038c174dae109cc6283991e 100644 --- a/tests/script/tsim/sma/rsmaCreateInsertQuery.sim +++ b/tests/script/tsim/sma/rsmaCreateInsertQuery.sim @@ -37,6 +37,15 @@ if $rows > 2 then print retention level 2 file rows $rows > 2 return -1 endi + + +if $data01 != 1 then + if $data01 != 10 then + print retention level 2 file result $data01 != 1 or 10 + return -1 + endi +endi + print =============== select * from retention level 1 from memory sql select * from ct1 where ts > now-8d; print $data00 $data01 @@ -44,15 +53,30 @@ if $rows > 2 then print retention level 1 file rows $rows > 2 return -1 endi + +if $data01 != 1 then + if $data01 != 10 then + print retention level 1 file result $data01 != 1 or 10 + return -1 + endi +endi + print =============== select * from retention level 0 from memory sql select * from ct1 where ts > now-3d; print $data00 $data01 print $data10 $data11 print $data20 $data21 + if $rows < 1 then print retention level 0 file rows $rows < 1 return -1 endi + +if $data01 != 10 then + print retention level 0 file result $data01 != 10 + return -1 +endi + #=================================================================== @@ -68,6 +92,13 @@ if $rows > 2 then return -1 endi +if $data01 != 1 then + if $data01 != 10 then + print retention level 2 file result $data01 != 1 or 10 + return -1 + endi +endi + print =============== select * from retention level 1 from file sql select * from ct1 where ts > now-8d; print $data00 $data01 @@ -76,6 +107,13 @@ if $rows > 2 then return -1 endi +if $data01 != 1 then + if $data01 != 10 then + print retention level 1 file result $data01 != 1 or 10 + return -1 + endi +endi + print =============== select * from retention level 0 from file sql select * from ct1 where ts > now-3d; print $data00 $data01 @@ -86,4 +124,9 @@ if $rows < 1 then return -1 endi +if $data01 != 10 then + print retention level 0 file result $data01 != 10 + return -1 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/sma/tsmaCreateInsertData.sim b/tests/script/tsim/sma/tsmaCreateInsertData.sim index b7a127e1b0d67f9af620919740dae87e649c82cd..07c5adef5d8114e65bb82b66e334b30c3b59ad5b 100644 --- a/tests/script/tsim/sma/tsmaCreateInsertData.sim +++ b/tests/script/tsim/sma/tsmaCreateInsertData.sim @@ -37,5 +37,12 @@ print =============== trigger stream to execute sma aggr task and insert sma dat sql insert into ct1 values(now+5s, 20, 20.0, 30.0) #=================================================================== +print =============== select * from ct1 from memory +sql select * from ct1; +print $data00 $data01 +if $rows != 5 then + print rows $rows != 5 + return -1 +endi system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stable/alter1.sim b/tests/script/tsim/stable/alter_comment.sim similarity index 99% rename from tests/script/tsim/stable/alter1.sim rename to tests/script/tsim/stable/alter_comment.sim index 1205f50f6ea144de6f5fae06ef7569a60b47e0cb..cfcbb9a1daa046c894bbfe47f4684ded5faf79a6 100644 --- a/tests/script/tsim/stable/alter1.sim +++ b/tests/script/tsim/stable/alter_comment.sim @@ -166,4 +166,5 @@ if $data[0][6] != abcde then return -1 endi +return system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/alter/count.sim b/tests/script/tsim/stable/alter_count.sim similarity index 96% rename from tests/script/general/alter/count.sim rename to tests/script/tsim/stable/alter_count.sim index fc936668b8ea08f9cd08874ad98668a4d8904315..e5af9a5735e6f7f9844d055be8d4c2892d6b2ed7 100644 --- a/tests/script/general/alter/count.sim +++ b/tests/script/tsim/stable/alter_count.sim @@ -1,13 +1,8 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 2 -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1 -system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4 print ========= start dnode1 as master system sh/exec.sh -n dnode1 -s start -sleep 2000 sql connect print ======== step1 @@ -141,10 +136,13 @@ endi print ============= step10 system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 3000 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sql connect + +sql select count(a), count(b), count(c), count(d), count(e), count(f), count(g), count(h) from d1.tb; +sql select count(a), count(b), count(c), count(d), count(e), count(f), count(g), count(h) from d1.tb; +sql use d1 sql select count(a), count(b), count(c), count(d), count(e), count(f), count(g), count(h) from tb if $data00 != 24 then return -1 diff --git a/tests/script/tsim/stable/column_add.sim b/tests/script/tsim/stable/column_add.sim new file mode 100644 index 0000000000000000000000000000000000000000..a5d9b48508baa78e7266a9af7d1473b192643041 --- /dev/null +++ b/tests/script/tsim/stable/column_add.sim @@ -0,0 +1,303 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sql connect + +print ========== prepare stb and ctb +sql create database db vgroups 1 +sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 float, t3 binary(16)) comment "abd" +sql create table db.ctb using db.stb tags(101, 102, "103") +sql insert into db.ctb values(now, 1, "2") + +sql show db.stables +if $rows != 1 then + return -1 +endi +if $data[0][0] != stb then + return -1 +endi +if $data[0][1] != db then + return -1 +endi +if $data[0][3] != 3 then + return -1 +endi +if $data[0][4] != 3 then + return -1 +endi +if $data[0][6] != abd then + return -1 +endi + +sql show db.tables +if $rows != 1 then + return -1 +endi +if $data[0][0] != ctb then + return -1 +endi +if $data[0][1] != db then + return -1 +endi +if $data[0][3] != 3 then + return -1 +endi +if $data[0][4] != stb then + return -1 +endi +if $data[0][6] != 2 then + return -1 +endi +if $data[0][9] != CHILD_TABLE then + return -1 +endi + +sql select * from db.stb +if $rows != 1 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 2 then + return -1 +endi +if $data[0][3] != 101 then + return -1 +endi + +sql_error alter table db.stb add column ts int +sql_error alter table db.stb add column t1 int +sql_error alter table db.stb add column t2 int +sql_error alter table db.stb add column t3 int +sql_error alter table db.stb add column c1 int + +print ========== step1 add column c3 +sql alter table db.stb add column c3 int +sql show db.stables +if $data[0][3] != 4 then + return -1 +endi + +sql show db.tables +if $data[0][3] != 4 then + return -1 +endi + +sql select * from db.stb +sql select * from db.stb +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +if $rows != 1 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 2 then + return -1 +endi +if $data[0][3] != NULL then + return -1 +endi +if $data[0][4] != 101 then + return -1 +endi + +sql insert into db.ctb values(now+1s, 1, 2, 3) +sql select * from db.stb +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] + +if $rows != 2 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 2 then + return -1 +endi +if $data[0][3] != NULL then + return -1 +endi +if $data[0][4] != 101 then + return -1 +endi +if $data[1][1] != 1 then + return -1 +endi +if $data[1][2] != 2 then + return -1 +endi +if $data[1][3] != 3 then + return -1 +endi +if $data[1][4] != 101 then + return -1 +endi + +print ========== step2 add column c4 +sql alter table db.stb add column c4 bigint +sql select * from db.stb +sql insert into db.ctb values(now+2s, 1, 2, 3, 4) +sql select * from db.stb +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] + +if $rows != 3 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 2 then + return -1 +endi +if $data[0][3] != NULL then + return -1 +endi +if $data[0][4] != NULL then + return -1 +endi +if $data[0][5] != 101 then + return -1 +endi +if $data[1][1] != 1 then + return -1 +endi +if $data[1][2] != 2 then + return -1 +endi +if $data[1][3] != 3 then + return -1 +endi +if $data[1][4] != NULL then + return -1 +endi +if $data[1][5] != 101 then + return -1 +endi +if $data[2][1] != 1 then + return -1 +endi +if $data[2][2] != 2 then + return -1 +endi +if $data[2][3] != 3 then + return -1 +endi +if $data[2][4] != 4 then + return -1 +endi +if $data[2][5] != 101 then + return -1 +endi + +print ========== step3 add column c5 +sql alter table db.stb add column c5 int +sql insert into db.ctb values(now+3s, 1, 2, 3, 4, 5) +sql select * from db.stb +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] + +if $rows != 4 then + return -1 +endi +if $data[2][1] != 1 then + return -1 +endi +if $data[2][2] != 2 then + return -1 +endi +if $data[2][3] != 3 then + return -1 +endi +if $data[2][4] != 4 then + return -1 +endi +if $data[2][5] != NULL then + return -1 +endi +if $data[2][6] != 101 then + return -1 +endi +if $data[3][1] != 1 then + return -1 +endi +if $data[3][2] != 2 then + return -1 +endi +if $data[3][3] != 3 then + return -1 +endi +if $data[3][4] != 4 then + return -1 +endi +if $data[3][5] != 5 then + return -1 +endi +if $data[3][6] != 101 then + return -1 +endi + +print ========== step4 add column c6 +sql alter table db.stb add column c6 int +sql insert into db.ctb values(now+4s, 1, 2, 3, 4, 5, 6) +sql select * from db.stb + +if $rows != 5 then + return -1 +endi +if $data[3][1] != 1 then + return -1 +endi +if $data[3][2] != 2 then + return -1 +endi +if $data[3][3] != 3 then + return -1 +endi +if $data[3][4] != 4 then + return -1 +endi +if $data[3][5] != 5 then + return -1 +endi +if $data[3][6] != NULL then + return -1 +endi +if $data[3][7] != 101 then + return -1 +endi +if $data[4][1] != 1 then + return -1 +endi +if $data[4][2] != 2 then + return -1 +endi +if $data[4][3] != 3 then + return -1 +endi +if $data[4][4] != 4 then + return -1 +endi +if $data[4][5] != 5 then + return -1 +endi +if $data[4][6] != 6 then + return -1 +endi +if $data[4][7] != 101 then + return -1 +endi + +print ========== step5 describe +sql describe db.ctb +if $rows != 10 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/stable/column_drop.sim b/tests/script/tsim/stable/column_drop.sim new file mode 100644 index 0000000000000000000000000000000000000000..af84a3ecac28da9f6dbf41d08af707d1aa6226a4 --- /dev/null +++ b/tests/script/tsim/stable/column_drop.sim @@ -0,0 +1,209 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sql connect + +print ========== prepare stb and ctb +sql create database db vgroups 1 +sql create table db.stb (ts timestamp, c1 int, c2 binary(4), c3 int, c4 bigint, c5 int, c6 int) tags(t1 int, t2 float, t3 binary(16)) comment "abd" +sql create table db.ctb using db.stb tags(101, 102, "103") +sql insert into db.ctb values(now, 1, "2", 3, 4, 5, 6) + +sql show db.stables +if $rows != 1 then + return -1 +endi +if $data[0][0] != stb then + return -1 +endi +if $data[0][1] != db then + return -1 +endi +if $data[0][3] != 7 then + return -1 +endi +if $data[0][4] != 3 then + return -1 +endi +if $data[0][6] != abd then + return -1 +endi + +sql show db.tables +if $rows != 1 then + return -1 +endi +if $data[0][0] != ctb then + return -1 +endi +if $data[0][1] != db then + return -1 +endi +if $data[0][3] != 7 then + return -1 +endi +if $data[0][4] != stb then + return -1 +endi +if $data[0][6] != 2 then + return -1 +endi +if $data[0][9] != CHILD_TABLE then + return -1 +endi + +sql select * from db.stb +if $rows != 1 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 2 then + return -1 +endi +if $data[0][3] != 3 then + return -1 +endi +if $data[0][4] != 4 then + return -1 +endi +if $data[0][5] != 5 then + return -1 +endi +if $data[0][6] != 6 then + return -1 +endi +if $data[0][7] != 101 then + return -1 +endi + +sql_error alter table db.stb drop column ts +sql_error alter table db.stb drop column t1 +sql_error alter table db.stb drop column t2 +sql_error alter table db.stb drop column t3 +sql_error alter table db.stb drop column c9 + +print ========== step1 drop column c6 +sql alter table db.stb drop column c6 +sql show db.stables +if $data[0][3] != 6 then + return -1 +endi + +sql show db.tables +if $data[0][3] != 6 then + return -1 +endi + +sql select * from db.stb +sql select * from db.stb +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +if $rows != 1 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 2 then + return -1 +endi +if $data[0][3] != 3 then + return -1 +endi +if $data[0][4] != 4 then + return -1 +endi +if $data[0][5] != 5 then + return -1 +endi +if $data[0][6] != 101 then + return -1 +endi + +sql insert into db.ctb values(now+1s, 1, 2, 3, 4, 5) +sql select * from db.stb +if $rows != 2 then + return -1 +endi + +print ========== step2 drop column c5 +sql alter table db.stb drop column c5 +sql insert into db.ctb values(now+2s, 1, 2, 3, 4, 5) +sql insert into db.ctb values(now+3s, 1, 2, 3, 4) +sql_error insert into db.ctb values(now+2s, 1, 2, 3, 4, 5) + +sql select * from db.stb +if $rows != 4 then + return -1 +endi + +print ========== step3 drop column c4 +sql alter table db.stb drop column c4 +sql select * from db.stb +sql_error insert into db.ctb values(now+2s, 1, 2, 3, 4, 5) +sql_error insert into db.ctb values(now+2s, 1, 2, 3, 4) +sql insert into db.ctb values(now+3s, 1, 2, 3) + +sql select * from db.stb +if $rows != 5 then + return -1 +endi + +print ========== step4 add column c4 +sql alter table db.stb add column c4 binary(13) +sql insert into db.ctb values(now+4s, 1, 2, 3, '4') +sql select * from db.stb +if $rows != 6 then + return -1 +endi +if $data[1][4] != NULL then + return -1 +endi +if $data[2][4] != NULL then + return -1 +endi +if $data[3][4] != NULL then + return -1 +endi +if $data[5][4] != 4 then + return -1 +endi + +print ========== step5 describe +sql describe db.ctb +if $rows != 8 then + return -1 +endi +if $data[0][0] != ts then + return -1 +endi +if $data[1][0] != c1 then + return -1 +endi +if $data[2][0] != c2 then + return -1 +endi +if $data[3][0] != c3 then + return -1 +endi +if $data[4][0] != c4 then + return -1 +endi +if $data[4][1] != VARCHAR then + return -1 +endi +if $data[4][2] != 13 then + return -1 +endi +if $data[5][0] != t1 then + return -1 +endi +if $data[6][0] != t2 then + return -1 +endi +if $data[7][0] != t3 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/stable/column_modify.sim b/tests/script/tsim/stable/column_modify.sim new file mode 100644 index 0000000000000000000000000000000000000000..16e7ff8f67f1d9818947c54f6929728b086f44ab --- /dev/null +++ b/tests/script/tsim/stable/column_modify.sim @@ -0,0 +1,106 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sql connect + +print ========== prepare stb and ctb +sql create database db vgroups 1 +sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 float, t3 binary(16)) comment "abd" +sql create table db.ctb using db.stb tags(101, 102, "103") +sql insert into db.ctb values(now, 1, "1234") + +sql_error alter table db.stb MODIFY column c2 binary(3) +sql_error alter table db.stb MODIFY column c2 int +sql_error alter table db.stb MODIFY column c1 int +sql_error alter table db.stb MODIFY column ts int +sql_error insert into db.ctb values(now, 1, "12345") + +print ========== step1 modify column +sql alter table db.stb MODIFY column c2 binary(5) +sql insert into db.ctb values(now, 1, "12345") + +sql select * from db.stb +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] + +if $rows != 2 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 1234 then + return -1 +endi +if $data[0][3] != 101 then + return -1 +endi +if $data[1][1] != 1 then + return -1 +endi +if $data[1][2] != 12345 then + return -1 +endi +if $data[1][3] != 101 then + return -1 +endi + +print ========== step2 describe +sql describe db.ctb +if $rows != 6 then + return -1 +endi +if $data[0][0] != ts then + return -1 +endi +if $data[1][0] != c1 then + return -1 +endi +if $data[2][0] != c2 then + return -1 +endi +if $data[2][1] != VARCHAR then + return -1 +endi +if $data[2][2] != 5 then + return -1 +endi +if $data[3][0] != t1 then + return -1 +endi +if $data[4][0] != t2 then + return -1 +endi +if $data[5][0] != t3 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode1 -s start + +sql connect +sql select * from db.ctb + +if $rows != 2 then + return -1 +endi +#if $data[0][1] != 1 then +# return -1 +#endi +#if $data[0][2] != 1234 then +# return -1 +#endi +#if $data[0][3] != 101 then +# return -1 +#endi +#if $data[1][1] != 1 then +# return -1 +#endi +#if $data[1][2] != 12345 then +# return -1 +#endi +#if $data[1][3] != 101 then +# return -1 +#endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/stable/disk.sim b/tests/script/tsim/stable/disk.sim index c1ced6ae1076b3b1cc5e8a79f31188c076a93f59..eeaa8293a505a7af3b774eb2e0d3b7fab5b6fe49 100644 --- a/tests/script/tsim/stable/disk.sim +++ b/tests/script/tsim/stable/disk.sim @@ -1,17 +1,9 @@ system sh/stop_dnodes.sh - - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect print ======================== dnode1 start - $dbPrefix = d_db $tbPrefix = d_tb $mtPrefix = d_mt @@ -59,9 +51,8 @@ endi sleep 1000 system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 3000 +sleep 1000 system sh/exec.sh -n dnode1 -s start -sleep 6000 sql use $db sql show vgroups diff --git a/tests/script/tsim/stable/dnode3.sim b/tests/script/tsim/stable/dnode3.sim index 706c4aa499ce3cebaedcbb71c24a9473a9069c9a..03e8df26b7543e61f0e8e52a1fd5bd8ab9de5e0f 100644 --- a/tests/script/tsim/stable/dnode3.sim +++ b/tests/script/tsim/stable/dnode3.sim @@ -1,19 +1,9 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode2 -i 2 system sh/deploy.sh -n dnode3 -i 3 system sh/deploy.sh -n dnode4 -i 4 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode3 -c walLevel -v 1 -system sh/cfg.sh -n dnode4 -c walLevel -v 1 -# system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -# system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -# system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 -# system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 system sh/exec.sh -n dnode1 -s start - sql connect sql create dnode $hostname PORT 7200 diff --git a/tests/script/tsim/stable/metrics.sim b/tests/script/tsim/stable/metrics.sim index e68d95511cfd3c4ea556e34ffed5111f05064405..26323b4a92539ed62fdd060cc7e73dfafec70101 100644 --- a/tests/script/tsim/stable/metrics.sim +++ b/tests/script/tsim/stable/metrics.sim @@ -1,10 +1,6 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 system sh/exec.sh -n dnode1 -s start - -sleep 1000 sql connect $dbPrefix = m_me_db diff --git a/tests/script/tsim/stable/refcount.sim b/tests/script/tsim/stable/refcount.sim index fffa6f75a4adfe2b52b1a7d1b587f6bf7a182ba4..d77c8e08900c1b0eeeee95bbfc4c6a4540558e6b 100644 --- a/tests/script/tsim/stable/refcount.sim +++ b/tests/script/tsim/stable/refcount.sim @@ -1,11 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect print =============== step1 diff --git a/tests/script/tsim/stable/show.sim b/tests/script/tsim/stable/show.sim index 823aefe9d86954dc8a3af85359ec02a475182aae..d3ab75adf5ac08dbd4c2a8a0870cfe4fbfd62a4d 100644 --- a/tests/script/tsim/stable/show.sim +++ b/tests/script/tsim/stable/show.sim @@ -1,14 +1,9 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect print ======================== create stable - sql create database d1 sql use d1 diff --git a/tests/script/tsim/stable/add_column.sim b/tests/script/tsim/stable/tag_add.sim similarity index 55% rename from tests/script/tsim/stable/add_column.sim rename to tests/script/tsim/stable/tag_add.sim index 0b2df509f9f4c32f60fd073076517911d1f84f3e..01cc7bc36c9f9dc5f69198f4f0282b0f15531fe8 100644 --- a/tests/script/tsim/stable/add_column.sim +++ b/tests/script/tsim/stable/tag_add.sim @@ -5,8 +5,8 @@ sql connect print ========== prepare stb and ctb sql create database db vgroups 1 -sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 float, t3 binary(16)) comment "abd" -sql create table db.ctb using db.stb tags(101, 102, "103") +sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 binary(16)) comment "abd" +sql create table db.ctb using db.stb tags(101, "102") sql insert into db.ctb values(now, 1, "2") sql show db.stables @@ -22,7 +22,7 @@ endi if $data[0][3] != 3 then return -1 endi -if $data[0][4] != 3 then +if $data[0][4] != 2 then return -1 endi if $data[0][6] != abd then @@ -65,20 +65,43 @@ endi if $data[0][3] != 101 then return -1 endi +if $data[0][4] != 102 then + return -1 +endi + +sql_error alter table db.stb add tag ts int +sql_error alter table db.stb add tag t1 int +sql_error alter table db.stb add tag t2 int +sql_error alter table db.stb add tag c1 int +sql_error alter table db.stb add tag c2 int + +print ========== step1 add tag t3 +sql alter table db.stb add tag t3 int -print ========== add column c3 -sql alter table db.stb add column c3 int sql show db.stables -if $data[0][3] != 4 then +if $data[0][3] != 3 then return -1 endi sql show db.tables -if $data[0][3] != 4 then +if $data[0][3] != 3 then + return -1 +endi + +sql describe db.ctb +if $rows != 6 then + return -1 +endi +if $data[5][0] != t3 then + return -1 +endi +if $data[5][1] != INT then + return -1 +endi +if $data[5][2] != 4 then return -1 endi -sql select * from db.stb sql select * from db.stb print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] if $rows != 1 then @@ -90,19 +113,25 @@ endi if $data[0][2] != 2 then return -1 endi -if $data[0][3] != NULL then +if $data[0][3] != 101 then + return -1 +endi +if $data[0][4] != 102 then return -1 endi -if $data[0][4] != 101 then +if $data[0][5] != NULL then return -1 endi -sql insert into db.ctb values(now+1s, 1, 2, 3) +print ========== step2 add tag t4 +sql alter table db.stb add tag t4 bigint +sql select * from db.stb sql select * from db.stb print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] -if $rows != 2 then +if $rows != 1 then return -1 endi if $data[0][1] != 1 then @@ -111,31 +140,54 @@ endi if $data[0][2] != 2 then return -1 endi -if $data[0][3] != NULL then +if $data[0][3] != 101 then + return -1 +endi +if $data[0][4] != 102 then return -1 endi -if $data[0][4] != 101 then +if $data[0][5] != NULL then return -1 endi -if $data[1][1] != 1 then +if $data[0][6] != NULL then return -1 endi -if $data[2][2] != 2 then + +sql_error create table db.ctb2 using db.stb tags(101, "102") +sql create table db.ctb2 using db.stb tags(101, "102", 103, 104) +sql insert into db.ctb2 values(now, 1, "2") + +sql select * from db.stb where tbname = 'ctb2'; +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] + +if $rows != 1 then return -1 endi -if $data[1][3] != 3 then +if $data[0][1] != 1 then return -1 endi -if $data[1][4] != 101 then +if $data[0][2] != 2 then + return -1 +endi +if $data[0][3] != 101 then + return -1 +endi +if $data[0][4] != 102 then + return -1 +endi +if $data[0][5] != 103 then + return -1 +endi +if $data[0][6] != 104 then return -1 endi -print ========== add column c4 -sql alter table db.stb add column c4 bigint -sql insert into db.ctb values(now+2s, 1, 2, 3, 4) -sql select * from db.stb -sql select * from db.stb -print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] -print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] -print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print ========== step3 describe +sql describe db.ctb +if $rows != 7 then + return -1 +endi +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/stable/tag_drop.sim b/tests/script/tsim/stable/tag_drop.sim new file mode 100644 index 0000000000000000000000000000000000000000..afac59daff9b8d3d2713517f9bf7523e2c612b6c --- /dev/null +++ b/tests/script/tsim/stable/tag_drop.sim @@ -0,0 +1,337 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sql connect + +print ========== prepare stb and ctb +sql create database db vgroups 1 +sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 binary(16)) comment "abd" +sql create table db.ctb using db.stb tags(101, "102") +sql insert into db.ctb values(now, 1, "2") + +sql show db.stables +if $rows != 1 then + return -1 +endi +if $data[0][0] != stb then + return -1 +endi +if $data[0][1] != db then + return -1 +endi +if $data[0][3] != 3 then + return -1 +endi +if $data[0][4] != 2 then + return -1 +endi +if $data[0][6] != abd then + return -1 +endi + +sql show db.tables +if $rows != 1 then + return -1 +endi +if $data[0][0] != ctb then + return -1 +endi +if $data[0][1] != db then + return -1 +endi +if $data[0][3] != 3 then + return -1 +endi +if $data[0][4] != stb then + return -1 +endi +if $data[0][6] != 2 then + return -1 +endi +if $data[0][9] != CHILD_TABLE then + return -1 +endi + +sql select * from db.stb +if $rows != 1 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 2 then + return -1 +endi +if $data[0][3] != 101 then + return -1 +endi +if $data[0][4] != 102 then + return -1 +endi + +sql_error alter table db.stb drop tag ts int +sql_error alter table db.stb drop tag t3 int +sql_error alter table db.stb drop tag t4 int +sql_error alter table db.stb drop tag c1 int +sql_error alter table db.stb drop tag c2 int + +print ========== step1 drop tag t2 +sql alter table db.stb drop tag t2 + +sql show db.stables +if $data[0][4] != 1 then + return -1 +endi + +sql describe db.ctb +if $rows != 4 then + return -1 +endi +if $data[4][0] != null then + return -1 +endi + +sql select * from db.stb +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +if $rows != 1 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 2 then + return -1 +endi +if $data[0][3] != 101 then + return -1 +endi +if $data[0][4] != null then + return -1 +endi + +print ========== step2 add tag t3 +sql alter table db.stb add tag t3 int + +sql show db.stables +if $data[0][4] != 2 then + return -1 +endi + +sql describe db.ctb +if $rows != 5 then + return -1 +endi +if $data[4][0] != t3 then + return -1 +endi +if $data[4][1] != INT then + return -1 +endi +if $data[4][2] != 4 then + return -1 +endi + +sql select * from db.stb +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +if $rows != 1 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 2 then + return -1 +endi +if $data[0][3] != 101 then + return -1 +endi +if $data[0][4] != NULL then + return -1 +endi + +print ========== step3 add tag t4 +sql alter table db.stb add tag t4 bigint +sql select * from db.stb +sql select * from db.stb +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] + +if $rows != 1 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 2 then + return -1 +endi +if $data[0][3] != 101 then + return -1 +endi +if $data[0][4] != NULL then + return -1 +endi +if $data[0][5] != NULL then + return -1 +endi +if $data[0][6] != null then + return -1 +endi + +sql_error create table db.ctb2 using db.stb tags(101, "102") +sql create table db.ctb2 using db.stb tags(201, 202, 203) +sql insert into db.ctb2 values(now, 1, "2") + +sql select * from db.stb where tbname = 'ctb2'; +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] + +if $rows != 1 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 2 then + return -1 +endi +if $data[0][3] != 201 then + return -1 +endi +if $data[0][4] != 202 then + return -1 +endi +if $data[0][5] != 203 then + return -1 +endi + +print ========== step4 describe +sql describe db.ctb +if $rows != 6 then + return -1 +endi + +print ========== step5 add tag2 +sql alter table db.stb add tag t2 bigint +sql select * from db.stb where tbname = 'ctb2'; +sql select * from db.stb where tbname = 'ctb2'; +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] + +if $rows != 1 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 2 then + return -1 +endi +if $data[0][3] != 201 then + return -1 +endi +if $data[0][4] != 202 then + return -1 +endi +if $data[0][5] != 203 then + return -1 +endi +if $data[0][6] != NULL then + return -1 +endi + +sql_error create table db.ctb2 using db.stb tags(101, "102") +sql_error create table db.ctb2 using db.stb tags(201, 202, 203) +sql create table db.ctb3 using db.stb tags(301, 302, 303, 304) +sql insert into db.ctb3 values(now, 1, "2") + +sql select * from db.stb where tbname = 'ctb3'; +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] + +if $rows != 1 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 2 then + return -1 +endi +if $data[0][3] != 301 then + return -1 +endi +if $data[0][4] != 302 then + return -1 +endi +if $data[0][5] != 303 then + return -1 +endi +if $data[0][6] != 304 then + return -1 +endi + +print ========== step6 describe +sql describe db.ctb +if $rows != 7 then + return -1 +endi + +if $data[3][0] != t1 then + return -1 +endi +if $data[4][0] != t3 then + return -1 +endi +if $data[5][0] != t4 then + return -1 +endi +if $data[6][0] != t2 then + return -1 +endi +if $data[6][1] != BIGINT then + return -1 +endi + +print ========== step7 drop tag t1 +sql alter table db.stb drop tag t1 + +sql show db.stables +if $data[0][4] != 3 then + return -1 +endi + +sql describe db.ctb +if $rows != 6 then + return -1 +endi + +sql select * from db.stb where tbname = 'ctb3'; +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] + +if $rows != 1 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 2 then + return -1 +endi +if $data[0][3] != 302 then + return -1 +endi +if $data[0][4] != 303 then + return -1 +endi +if $data[0][5] != 304 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/stable/tag_modify.sim b/tests/script/tsim/stable/tag_modify.sim new file mode 100644 index 0000000000000000000000000000000000000000..62e4c7b28255ee085250cb4fc43612116fc50be0 --- /dev/null +++ b/tests/script/tsim/stable/tag_modify.sim @@ -0,0 +1,123 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sql connect + +print ========== prepare stb and ctb +sql create database db vgroups 1 +sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 binary(4)) comment "abd" + +sql_error alter table db.stb MODIFY tag c2 binary(3) +sql_error alter table db.stb MODIFY tag c2 int +sql_error alter table db.stb MODIFY tag c1 int +sql_error alter table db.stb MODIFY tag ts int +sql_error alter table db.stb MODIFY tag t2 binary(3) +sql_error alter table db.stb MODIFY tag t2 int +sql_error alter table db.stb MODIFY tag t1 int +sql create table db.ctb using db.stb tags(101, "12345") +sql insert into db.ctb values(now, 1, "1234") + +sql select * from db.stb +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] + +if $rows != 1 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 1234 then + return -1 +endi +if $data[0][3] != 101 then + return -1 +endi +if $data[0][4] != 1234 then + return -1 +endi + +print ========== step1 modify tag +sql alter table db.stb MODIFY tag t2 binary(5) +sql select * from db.stb + +sql create table db.ctb2 using db.stb tags(101, "12345") +sql insert into db.ctb2 values(now, 1, "1234") + +sql select * from db.stb where tbname = 'ctb2'; +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] + +if $rows != 1 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 1234 then + return -1 +endi +if $data[0][3] != 101 then + return -1 +endi +if $data[0][4] != 12345 then + return -1 +endi + +print ========== step2 describe +sql describe db.ctb2 +if $rows != 5 then + return -1 +endi +if $data[0][0] != ts then + return -1 +endi +if $data[1][0] != c1 then + return -1 +endi +if $data[2][0] != c2 then + return -1 +endi +if $data[3][0] != t1 then + return -1 +endi +if $data[4][0] != t2 then + return -1 +endi +if $data[4][1] != VARCHAR then + return -1 +endi +if $data[4][2] != 5 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode1 -s start + +sql connect +sql describe db.ctb2 +if $rows != 5 then + return -1 +endi +if $data[0][0] != ts then + return -1 +endi +if $data[1][0] != c1 then + return -1 +endi +if $data[2][0] != c2 then + return -1 +endi +if $data[3][0] != t1 then + return -1 +endi +if $data[4][0] != t2 then + return -1 +endi +if $data[4][1] != VARCHAR then + return -1 +endi +if $data[4][2] != 5 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/stable/tag_rename.sim b/tests/script/tsim/stable/tag_rename.sim new file mode 100644 index 0000000000000000000000000000000000000000..2f67a3ab2c51d8c8499219ea8779b23797d9d0af --- /dev/null +++ b/tests/script/tsim/stable/tag_rename.sim @@ -0,0 +1,120 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sql connect + +print ========== prepare stb and ctb +sql create database db vgroups 1 +sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 binary(4)) comment "abd" + +sql_error alter table db.stb rename tag c2 c3 +sql_error alter table db.stb rename tag c2 c3 +sql_error alter table db.stb rename tag c1 c3 +sql_error alter table db.stb rename tag ts c3 +sql_error alter table db.stb rename tag t2 t1 +sql_error alter table db.stb rename tag t2 t2 +sql_error alter table db.stb rename tag t1 t2 +sql create table db.ctb using db.stb tags(101, "12345") +sql insert into db.ctb values(now, 1, "1234") + +sql select * from db.stb +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] + +if $rows != 1 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 1234 then + return -1 +endi +if $data[0][3] != 101 then + return -1 +endi +if $data[0][4] != 1234 then + return -1 +endi + +print ========== step1 rename tag +sql alter table db.stb rename tag t1 t3 +sql select * from db.stb +sql select * from db.stb + +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] + +if $rows != 1 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 1234 then + return -1 +endi +if $data[0][3] != 101 then + return -1 +endi +if $data[0][4] != 1234 then + return -1 +endi + +print ========== step2 describe +sql describe db.ctb +if $rows != 5 then + return -1 +endi +if $data[0][0] != ts then + return -1 +endi +if $data[1][0] != c1 then + return -1 +endi +if $data[2][0] != c2 then + return -1 +endi +if $data[3][0] != t3 then + return -1 +endi +if $data[4][0] != t2 then + return -1 +endi +if $data[4][1] != VARCHAR then + return -1 +endi +if $data[4][2] != 4 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode1 -s start + +sql connect +sql describe db.ctb +if $rows != 5 then + return -1 +endi +if $data[0][0] != ts then + return -1 +endi +if $data[1][0] != c1 then + return -1 +endi +if $data[2][0] != c2 then + return -1 +endi +if $data[3][0] != t3 then + return -1 +endi +if $data[4][0] != t2 then + return -1 +endi +if $data[4][1] != VARCHAR then + return -1 +endi +if $data[4][2] != 4 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/stable/values.sim b/tests/script/tsim/stable/values.sim index e5e3118e12634f41b0d124d3ba379b8f93df442f..88eca28a12c6a48c5c39178f194e8836864e71d8 100644 --- a/tests/script/tsim/stable/values.sim +++ b/tests/script/tsim/stable/values.sim @@ -1,16 +1,9 @@ system sh/stop_dnodes.sh - - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect print ======================== dnode1 start - sql create database vdb0 sql create table vdb0.mt (ts timestamp, tbcol int) TAGS(tgcol int) diff --git a/tests/script/tsim/stable/vnode3.sim b/tests/script/tsim/stable/vnode3.sim index 97a8203883cc5f427ccc355cf5898b1e3ebe6cd2..186d0f5eea254aeb451f48c3cbf7d0d094723c09 100644 --- a/tests/script/tsim/stable/vnode3.sim +++ b/tests/script/tsim/stable/vnode3.sim @@ -1,16 +1,9 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect print ======================== dnode1 start - $dbPrefix = v3_db $tbPrefix = v3_tb $mtPrefix = v3_mt diff --git a/tests/script/tsim/tstream/basic0.sim b/tests/script/tsim/stream/basic0.sim similarity index 97% rename from tests/script/tsim/tstream/basic0.sim rename to tests/script/tsim/stream/basic0.sim index 9edad991dc0ac5c5c960be026c1fd17073d17881..29775a5ef1d1daf90122f053da6c153bac843341 100644 --- a/tests/script/tsim/tstream/basic0.sim +++ b/tests/script/tsim/stream/basic0.sim @@ -63,7 +63,8 @@ if $data02 != 234 then return -1 endi -if $data03 != 234 then +if $data03 != 234 then + print expect 234, actual $data03 return -1 endi diff --git a/tests/script/tsim/tstream/basic1.sim b/tests/script/tsim/stream/basic1.sim similarity index 100% rename from tests/script/tsim/tstream/basic1.sim rename to tests/script/tsim/stream/basic1.sim diff --git a/tests/script/tsim/stream/basic2.sim b/tests/script/tsim/stream/basic2.sim new file mode 100644 index 0000000000000000000000000000000000000000..247d8f62eeae7cda38829db5c2c4fd6b2843c787 --- /dev/null +++ b/tests/script/tsim/stream/basic2.sim @@ -0,0 +1,112 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print =============== create database +sql create database d0 vgroups 1 +sql show databases +if $rows != 3 then + return -1 +endi + +print $data00 $data01 $data02 + +sql use d0 + +print =============== create super table, include column type for count/sum/min/max/first +sql create table if not exists stb (ts timestamp, k int) tags (a int) + +sql show stables +if $rows != 1 then + return -1 +endi + +print =============== create child table +sql create table ct1 using stb tags(1000) +sql create table ct2 using stb tags(2000) +sql create table ct3 using stb tags(3000) + +sql show tables +if $rows != 3 then + return -1 +endi + +sql create stream s1 trigger at_once into outstb as select _wstartts, min(k), max(k), sum(k) as sum_alias from ct1 interval(10m) + +sql show stables +if $rows != 2 then + return -1 +endi + +print =============== insert data + +sql insert into ct1 values('2022-05-08 03:42:00.000', 234) +sleep 100 + +#=================================================================== +print =============== query data from child table + +sql select `_wstartts`,`min(k)`,`max(k)`,sum_alias from outstb +print rows: $rows +print $data00 $data01 $data02 $data03 +if $rows != 1 then + return -1 +endi + +if $data01 != 234 then + return -1 +endi + +if $data02 != 234 then + return -1 +endi + +if $data03 != 234 then + return -1 +endi + +#=================================================================== +print =============== insert data + +sql insert into ct1 values('2022-05-08 03:57:00.000', -111) +sleep 100 + + +#=================================================================== +print =============== query data from child table + +sql select `_wstartts`,`min(k)`,`max(k)`,sum_alias from outstb +print rows: $rows +print $data00 $data01 $data02 $data03 +print $data10 $data11 $data12 $data13 +if $rows != 2 then + return -1 +endi + +if $data01 != 234 then + return -1 +endi + +if $data02 != 234 then + return -1 +endi + +if $data03 != 234 then + return -1 +endi + +if $data11 != -111 then + return -1 +endi + +if $data12 != -111 then + return -1 +endi + +if $data13 != -111 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/session0.sim b/tests/script/tsim/stream/session0.sim new file mode 100644 index 0000000000000000000000000000000000000000..46b343632abd0347502b86e0978f2afd22c139a8 --- /dev/null +++ b/tests/script/tsim/stream/session0.sim @@ -0,0 +1,162 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print =============== create database +sql create database test vgroups 1 +sql show databases +if $rows != 3 then + return -1 +endi + +print $data00 $data01 $data02 + +sql use test + + +sql create table t1(ts timestamp, a int, b int , c int, d double,id int); +sql create stream streams2 trigger at_once into streamt as select _wstartts, count(*) c1, sum(a), max(a), min(d), stddev(a), last(a), first(d), max(id) s from t1 session(ts,10s); +sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL,1); +sql insert into t1 values(1648791223001,10,2,3,1.1,2); +sql insert into t1 values(1648791233002,3,2,3,2.1,3); +sql insert into t1 values(1648791243003,NULL,NULL,NULL,NULL,4); +sql insert into t1 values(1648791213002,NULL,NULL,NULL,NULL,5) (1648791233012,NULL,NULL,NULL,NULL,6); + +sql select * from streamt order by s desc; + +# row 0 +if $data01 != 3 then + print ======$data01 + return -1 +endi + +if $data02 != 3 then + print ======$data02 + return -1 +endi + +if $data03 != 3 then + print ======$data03 + return -1 +endi + +if $data04 != 2.100000000 then + print ======$data04 + return -1 +endi + +if $data05 != 0.000000000 then + print ======$data05 + return -1 +endi + +if $data06 != 3 then + print ======$data05 + return -1 +endi + +if $data07 != 2.100000000 then + print ======$data05 + return -1 +endi + +if $data08 != 6 then + print ======$data05 + return -1 +endi + +# row 1 + +if $data11 != 3 then + print ======$data01 + return -1 +endi + +if $data12 != 10 then + print ======$data02 + return -1 +endi + +if $data13 != 10 then + print ======$data03 + return -1 +endi + +if $data14 != 1.100000000 then + print ======$data04 + return -1 +endi + +if $data15 != 0.000000000 then + print ======$data05 + return -1 +endi + +if $data16 != 10 then + print ======$data05 + return -1 +endi + +if $data17 != 1.100000000 then + print ======$data05 + return -1 +endi + +if $data18 != 5 then + print ======$data05 + return -1 +endi + +sql insert into t1 values(1648791213000,1,2,3,1.0,7); +sql insert into t1 values(1648791223001,2,2,3,1.1,8); +sql insert into t1 values(1648791233002,3,2,3,2.1,9); +sql insert into t1 values(1648791243003,4,2,3,3.1,10); +sql insert into t1 values(1648791213002,4,2,3,4.1,11) ; +sql insert into t1 values(1648791213002,4,2,3,4.1,12) (1648791223009,4,2,3,4.1,13); + +sql select * from streamt order by s desc ; + +# row 0 +if $data01 != 7 then + print ======$data01 + return -1 +endi + +if $data02 != 9 then + print ======$data02 + return -1 +endi + +if $data03 != 4 then + print ======$data03 + return -1 +endi + +if $data04 != 1.100000000 then + print ======$data04 + return -1 +endi + +if $data05 != 0.816496581 then + print ======$data05 + return -1 +endi + +if $data06 != 3 then + print ======$data05 + return -1 +endi + +if $data07 != 1.100000000 then + print ======$data05 + return -1 +endi + +if $data08 != 13 then + print ======$data05 + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/session1.sim b/tests/script/tsim/stream/session1.sim new file mode 100644 index 0000000000000000000000000000000000000000..a44639ba7a5e17e51e6ac8190d991bfd2edf1a9e --- /dev/null +++ b/tests/script/tsim/stream/session1.sim @@ -0,0 +1,190 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print =============== create database +sql create database test vgroups 1 +sql show databases +if $rows != 3 then + return -1 +endi + +print $data00 $data01 $data02 + +sql use test + + +sql create table t1(ts timestamp, a int, b int , c int, d double,id int); +sql create stream streams2 trigger at_once into streamt as select _wstartts, count(*) c1, sum(a), min(b), max(id) s from t1 session(ts,10s); +sql insert into t1 values(1648791210000,1,1,1,1.1,1); +sql insert into t1 values(1648791220000,2,2,2,2.1,2); +sql insert into t1 values(1648791230000,3,3,3,3.1,3); +sql insert into t1 values(1648791240000,4,4,4,4.1,4); + +sql select * from streamt order by s desc; + +# row 0 +if $data01 != 4 then + print ======$data01 + return -1 +endi + +if $data02 != 10 then + print ======$data02 + return -1 +endi + +if $data03 != 1 then + print ======$data03 + return -1 +endi + +if $data04 != 4 then + print ======$data04 + return -1 +endi + +sql insert into t1 values(1648791250005,5,5,5,5.1,5); +sql insert into t1 values(1648791260006,6,6,6,6.1,6); +sql insert into t1 values(1648791270007,7,7,7,7.1,7); +sql insert into t1 values(1648791240005,5,5,5,5.1,8) (1648791250006,6,6,6,6.1,9); + +sql select * from streamt order by s desc; + +# row 0 +if $data01 != 8 then + print ======$data01 + return -1 +endi + +if $data02 != 32 then + print ======$data02 + return -1 +endi + +if $data03 != 1 then + print ======$data03 + return -1 +endi + +if $data04 != 9 then + print ======$data04 + return -1 +endi + +# row 1 +if $data11 != 1 then + print ======$data11 + return -1 +endi + +if $data12 != 7 then + print ======$data12 + return -1 +endi + +if $data13 != 7 then + print ======$data13 + return -1 +endi + +if $data14 != 7 then + print ======$data14 + return -1 +endi + +sql insert into t1 values(1648791280008,7,7,7,7.1,10) (1648791300009,8,8,8,8.1,11); +sql insert into t1 values(1648791260007,7,7,7,7.1,12) (1648791290008,7,7,7,7.1,13) (1648791290009,8,8,8,8.1,14); +sql insert into t1 values(1648791500000,7,7,7,7.1,15) (1648791520000,8,8,8,8.1,16) (1648791540000,8,8,8,8.1,17); +sql insert into t1 values(1648791530000,8,8,8,8.1,18); +sql insert into t1 values(1648791220000,10,10,10,10.1,19) (1648791290008,2,2,2,2.1,20) (1648791540000,17,17,17,17.1,21) (1648791500001,22,22,22,22.1,22); + +sql select * from streamt order by s desc; + +# row 0 +if $data01 != 2 then + print ======$data01 + return -1 +endi + +if $data02 != 29 then + print ======$data02 + return -1 +endi + +if $data03 != 7 then + print ======$data03 + return -1 +endi + +if $data04 != 22 then + print ======$data04 + return -1 +endi + +# row 1 +if $data11 != 3 then + print ======$data11 + return -1 +endi + +if $data12 != 33 then + print ======$data12 + return -1 +endi + +if $data13 != 8 then + print ======$data13 + return -1 +endi + +if $data14 != 21 then + print ======$data14 + return -1 +endi + +# row 2 +if $data21 != 4 then + print ======$data21 + return -1 +endi + +if $data22 != 25 then + print ======$data22 + return -1 +endi + +if $data23 != 2 then + print ======$data23 + return -1 +endi + +if $data24 != 20 then + print ======$data24 + return -1 +endi + +# row 3 +if $data31 != 10 then + print ======$data31 + return -1 +endi + +if $data32 != 54 then + print ======$data32 + return -1 +endi + +if $data33 != 1 then + print ======$data33 + return -1 +endi + +if $data34 != 19 then + print ======$data34 + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/triggerInterval0.sim b/tests/script/tsim/stream/triggerInterval0.sim new file mode 100644 index 0000000000000000000000000000000000000000..6f1d8f4b7bf88913239ccf1cc3a89fb1dbdf6bc9 --- /dev/null +++ b/tests/script/tsim/stream/triggerInterval0.sim @@ -0,0 +1,185 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print =============== create database +sql create database test vgroups 1 +sql show databases +if $rows != 3 then + return -1 +endi + +print $data00 $data01 $data02 + +sql use test +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams1 trigger window_close into streamt as select _wstartts, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from t1 interval(10s); + +sql insert into t1 values(1648791213001,1,2,3,1.0); +sleep 300 +sql select * from streamt; +if $rows != 0 then + print ======$rows + return -1 +endi + +sql insert into t1 values(1648791223001,2,2,3,1.1); +sql insert into t1 values(1648791223002,2,2,3,1.1); +sql insert into t1 values(1648791223003,2,2,3,1.1); +sql insert into t1 values(1648791223001,2,2,3,1.1); +sleep 300 +sql select * from streamt; +if $rows != 1 then + print ======$rows + return -1 +endi + +if $data01 != 1 then + print ======$data01 + return -1 +endi + +sql insert into t1 values(1648791233001,2,2,3,1.1); +sleep 300 +sql select * from streamt; +if $rows != 2 then + print ======$rows + return -1 +endi +if $data01 != 1 then + print ======$data01 + return -1 +endi +if $data11 != 3 then + print ======$data11 + return -1 +endi + +sql insert into t1 values(1648791223004,2,2,3,1.1); +sql insert into t1 values(1648791223004,2,2,3,1.1); +sql insert into t1 values(1648791223005,2,2,3,1.1); +sleep 300 +sql select * from streamt; +if $rows != 2 then + print ======$rows + return -1 +endi +if $data01 != 1 then + print ======$data01 + return -1 +endi +if $data11 != 5 then + print ======$data11 + return -1 +endi + + +sql insert into t1 values(1648791233002,3,2,3,2.1); +sql insert into t1 values(1648791213002,4,2,3,3.1) +sql insert into t1 values(1648791213002,4,2,3,4.1); +sleep 300 +sql select * from streamt; +if $rows != 2 then + print ======$rows + return -1 +endi +if $data01 != 2 then + print ======$data01 + return -1 +endi +if $data11 != 5 then + print ======$data11 + return -1 +endi + +sql create table t2(ts timestamp, a int, b int , c int, d double); +sql create stream streams2 trigger window_close watermark 20s into streamt2 as select _wstartts, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from t2 interval(10s); +sql insert into t2 values(1648791213000,1,2,3,1.0); +sql insert into t2 values(1648791239999,1,2,3,1.0); +sleep 300 +sql select * from streamt2; +if $rows != 0 then + print ======$rows + return -1 +endi + +sql insert into t2 values(1648791240000,1,2,3,1.0); +sleep 300 +sql select * from streamt2; +if $rows != 1 then + print ======$rows + return -1 +endi +if $data01 != 1 then + print ======$data01 + return -1 +endi + +sql insert into t2 values(1648791250001,1,2,3,1.0) (1648791250002,1,2,3,1.0) (1648791250003,1,2,3,1.0) (1648791240000,1,2,3,1.0); +sleep 300 +sql select * from streamt2; +if $rows != 1 then + print ======$rows + return -1 +endi +if $data01 != 1 then + print ======$data01 + return -1 +endi + +sql insert into t2 values(1648791280000,1,2,3,1.0); +sleep 300 +sql select * from streamt2; +if $rows != 4 then + print ======$rows + return -1 +endi +if $data01 != 1 then + print ======$data01 + return -1 +endi +if $data11 != 1 then + print ======$data11 + return -1 +endi +if $data21 != 1 then + print ======$data21 + return -1 +endi +if $data31 != 3 then + print ======$data31 + return -1 +endi + +sql insert into t2 values(1648791250001,1,2,3,1.0) (1648791250002,1,2,3,1.0) (1648791250003,1,2,3,1.0) (1648791280000,1,2,3,1.0) (1648791280001,1,2,3,1.0) (1648791280002,1,2,3,1.0) (1648791310000,1,2,3,1.0) (1648791280001,1,2,3,1.0); +sleep 300 +sql select * from streamt2; + +if $rows != 5 then + print ======$rows + return -1 +endi +if $data01 != 1 then + print ======$data01 + return -1 +endi +if $data11 != 1 then + print ======$data11 + return -1 +endi +if $data21 != 1 then + print ======$data21 + return -1 +endi +if $data31 != 3 then + print ======$data31 + return -1 +endi +if $data41 != 3 then + print ======$data31 + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/stream/triggerSession0.sim b/tests/script/tsim/stream/triggerSession0.sim new file mode 100644 index 0000000000000000000000000000000000000000..fb0666fdcfe847dd25a3e4eb3b66acd16ed09f63 --- /dev/null +++ b/tests/script/tsim/stream/triggerSession0.sim @@ -0,0 +1,105 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print =============== create database +sql create database test vgroups 1 +sql show databases +if $rows != 3 then + return -1 +endi + +print $data00 $data01 $data02 + +sql use test +sql create table t2(ts timestamp, a int, b int , c int, d double); +sql create stream streams2 trigger window_close into streamt2 as select _wstartts, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from t2 session(ts, 10s); + +sql insert into t2 values(1648791213000,1,2,3,1.0); +sql insert into t2 values(1648791222999,1,2,3,1.0); +sql insert into t2 values(1648791223000,1,2,3,1.0); +sql insert into t2 values(1648791223001,1,2,3,1.0); +sql insert into t2 values(1648791233001,1,2,3,1.0); +sleep 300 +sql select * from streamt2; +if $rows != 0 then + print ======$rows + return -1 +endi + +sql insert into t2 values(1648791243002,1,2,3,1.0); +sleep 300 +sql select * from streamt2; +if $rows != 1 then + print ======$rows + return -1 +endi + +if $data01 != 5 then + print ======$data01 + return -1 +endi + +sql insert into t2 values(1648791223001,1,2,3,1.0) (1648791223002,1,2,3,1.0) (1648791222999,1,2,3,1.0); +sleep 300 +sql select * from streamt2; +if $rows != 1 then + print ======$rows + return -1 +endi + +if $data01 != 6 then + print ======$data01 + return -1 +endi + +sql insert into t2 values(1648791233002,1,2,3,1.0); +sleep 300 +sql select * from streamt2; +if $rows != 1 then + print ======$rows + return -1 +endi + +if $data01 != 6 then + print ======$data01 + return -1 +endi + +sql insert into t2 values(1648791253003,1,2,3,1.0); +sleep 300 +sql select * from streamt2; +if $rows != 1 then + print ======$rows + return -1 +endi + +if $data01 != 8 then + print ======$data01 + return -1 +endi + +sql insert into t2 values(1648791243003,1,2,3,1.0) (1648791243002,1,2,3,1.0) (1648791270004,1,2,3,1.0) (1648791280005,1,2,3,1.0) (1648791290006,1,2,3,1.0); +sleep 500 +sql select * from streamt2; +if $rows != 3 then + print ======$rows + return -1 +endi + +if $data01 != 10 then + print ======$data01 + return -1 +endi +if $data11 != 1 then + print ======$data11 + return -1 +endi +if $data21 != 1 then + print ======$data21 + return -1 +endi + +#system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/sync/3Replica1VgElect.sim b/tests/script/tsim/sync/3Replica1VgElect.sim new file mode 100644 index 0000000000000000000000000000000000000000..61b3b09288faecf857c5d33e7a34ac3544c4db67 --- /dev/null +++ b/tests/script/tsim/sync/3Replica1VgElect.sim @@ -0,0 +1,478 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/deploy.sh -n dnode3 -i 3 +system sh/deploy.sh -n dnode4 -i 4 + +system sh/cfg.sh -n dnode1 -c supportVnodes -v 0 + +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +system sh/exec.sh -n dnode4 -s start + +$loop_cnt = 0 +check_dnode_ready: + $loop_cnt = $loop_cnt + 1 + sleep 200 + if $loop_cnt == 10 then + print ====> dnode not ready! + return -1 + endi +sql show dnodes +print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] +print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] +if $data[0][0] != 1 then + return -1 +endi +if $data[0][4] != ready then + goto check_dnode_ready +endi + +sql connect +sql create dnode $hostname port 7200 +sql create dnode $hostname port 7300 +sql create dnode $hostname port 7400 + +$loop_cnt = 0 +check_dnode_ready_1: +$loop_cnt = $loop_cnt + 1 +sleep 200 +if $loop_cnt == 10 then + print ====> dnodes not ready! + return -1 +endi +sql show dnodes +print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] +print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] +if $data[0][4] != ready then + goto check_dnode_ready_1 +endi +if $data[1][4] != ready then + goto check_dnode_ready_1 +endi +if $data[2][4] != ready then + goto check_dnode_ready_1 +endi +if $data[3][4] != ready then + goto check_dnode_ready_1 +endi + +$replica = 3 +$vgroups = 1 + +print ============= create database +sql create database db replica $replica vgroups $vgroups + +$loop_cnt = 0 +check_db_ready: +$loop_cnt = $loop_cnt + 1 +sleep 200 +if $loop_cnt == 100 then + print ====> db not ready! + return -1 +endi +sql show databases +print ===> rows: $rows +print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] $data[2][7] $data[2][8] $data[2][9] $data[2][6] $data[2][11] $data[2][12] $data[2][13] $data[2][14] $data[2][15] $data[2][16] $data[2][17] $data[2][18] $data[2][19] +if $rows != 3 then + return -1 +endi +if $data[2][19] != ready then + goto check_db_ready +endi + +sql use db + +$loop_cnt = 0 +check_vg_ready: +$loop_cnt = $loop_cnt + 1 +sleep 200 +if $loop_cnt == 300 then + print ====> vgroups not ready! + return -1 +endi + +sql show vgroups +print ===> rows: $rows +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][10] $data[0][11] + +if $rows != $vgroups then + return -1 +endi + +if $data[0][4] == LEADER then + if $data[0][6] == FOLLOWER then + if $data[0][8] == FOLLOWER then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][3] + endi + endi +elif $data[0][6] == LEADER then + if $data[0][4] == FOLLOWER then + if $data[0][8] == FOLLOWER then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][5] + endi + endi +elif $data[0][8] == LEADER then + if $data[0][4] == FOLLOWER then + if $data[0][6] == FOLLOWER then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][7] + endi + endi +else + goto check_vg_ready +endi + + +vg_ready: +print ====> create stable/child table +sql create table stb (ts timestamp, c1 int, c2 float, c3 binary(10)) tags (t1 int) + +sql show stables +if $rows != 1 then + return -1 +endi + +$ctbPrefix = ctb +$ntbPrefix = ntb +$tbNum = 10 +$i = 0 +while $i < $tbNum + $ctb = $ctbPrefix . $i + sql create table $ctb using stb tags( $i ) + $ntb = $ntbPrefix . $i + sql create table $ntb (ts timestamp, c1 int, c2 float, c3 binary(10)) + $i = $i + 1 +endw + +$totalTblNum = $tbNum * 2 +sleep 1000 +sql show tables +print ====> expect $totalTblNum and infinsert $rows in fact +if $rows != $totalTblNum then + return -1 +endi + +start_switch_leader: + +$switch_loop_cnt = 0 +sql show vgroups +$dnodeId = $data[0][3] +$dnodeId = dnode . $dnodeId + +switch_leader_to_offine_loop: + +print $dnodeId +print ====> stop $dnodeId +system sh/exec.sh -n $dnodeId -s stop -x SIGINT + + +$loop_cnt = 0 +$loop_cnt = $loop_cnt + 1 +sleep 201 +if $loop_cnt == 300 then + print ====> vgroups switch fail!!! + return -1 +endi +sql show vgroups +print ===> rows: $rows +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][10] $data[0][11] + +if $rows != $vgroups then + return -1 +endi + + +vg_offline_1: + +print ====> start $dnodeId +system sh/exec.sh -n $dnodeId -s start + +$switch_loop_cnt = $switch_loop_cnt + 1 +print $switch_loop_cnt + +if $switch_loop_cnt == 1 then + sql show vgroups + $dnodeId = $data[1][3] + $dnodeId = dnode . $dnodeId + goto switch_leader_to_offine_loop +elif $switch_loop_cnt == 2 then + sql show vgroups + $dnodeId = $data[2][3] + $dnodeId = dnode . $dnodeId + goto switch_leader_to_offine_loop +elif $switch_loop_cnt == 3 then + sql show vgroups + $dnodeId = $data[3][3] + $dnodeId = dnode . $dnodeId + goto switch_leader_to_offine_loop +elif $switch_loop_cnt == 4 then + sql show vgroups + $dnodeId = $data[4][3] + $dnodeId = dnode . $dnodeId + goto switch_leader_to_offine_loop +else + goto stop_leader_to_offine_loop +endi + +stop_leader_to_offine_loop: + +$loop_cnt = 0 +check_vg_ready1: +$loop_cnt = $loop_cnt + 1 +print $loop_cnt +sleep 202 +if $loop_cnt == 300 then + print ====> vgroups not ready! + return -1 +endi + +sql show vgroups +print ===> rows: $rows +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][10] $data[0][11] + +if $rows != $vgroups then + return -1 +endi + +if $data[0][4] == LEADER then + if $data[0][6] == FOLLOWER then + if $data[0][8] == FOLLOWER then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][3] + endi + endi +elif $data[0][6] == LEADER then + if $data[0][4] == FOLLOWER then + if $data[0][8] == FOLLOWER then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][5] + endi + endi +elif $data[0][8] == LEADER then + if $data[0][4] == FOLLOWER then + if $data[0][6] == FOLLOWER then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][7] + endi + endi +else + goto check_vg_ready1 +endi + + +print ====> final test: create stable/child table +sql create table stb1 (ts timestamp, c1 int, c2 float, c3 binary(10)) tags (t1 int) + + +sql show stables +if $rows != 2 then + return -1 +endi + +$ctbPrefix = ctb1 +$ntbPrefix = ntb1 +$tbNum = 10 +$i = 0 +while $i < $tbNum + $ctb = $ctbPrefix . $i + sql create table $ctb using stb1 tags( $i ) + $ntb = $ntbPrefix . $i + sql create table $ntb (ts timestamp, c1 int, c2 float, c3 binary(10)) + $i = $i + 1 +endw + +sleep 1000 +sql show stables +if $rows != 2 then + return -1 +endi + +sql show tables +if $rows != 40 then + return -1 +endi + + + +system sh/deploy.sh -n dnode5 -i 5 +system sh/exec.sh -n dnode5 -s start + +sql connect +sql create dnode $hostname port 7500 + +$loop_cnt = 0 +check_dnode_ready3: + $loop_cnt = $loop_cnt + 1 + sleep 200 + if $loop_cnt == 100 then + print ====> dnode not ready! + return -1 + endi + +sql show dnodes +print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] +print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] +print ===> $rows $data[4][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] + +if $rows != 5 then + return -1 +endi + +if $data[4][4] != ready then + goto check_dnode_ready3 +endi + + + +# restart clusters + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT +system sh/exec.sh -n dnode4 -s stop -x SIGINT +system sh/exec.sh -n dnode5 -s stop -x SIGINT + + + +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +system sh/exec.sh -n dnode4 -s start +system sh/exec.sh -n dnode5 -s start + + +$loop_cnt = 0 +check_dnode_ready_2: + $loop_cnt = $loop_cnt + 1 + sleep 200 + if $loop_cnt == 10 then + print ====> dnode not ready! + return -1 + endi +sql show dnodes +print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] +print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] +if $data[0][0] != 1 then + return -1 +endi + +if $data[0][4] != ready then + goto check_dnode_ready_2 +endi +if $data[1][4] != ready then + goto check_dnode_ready_2 +endi +if $data[2][4] != ready then + goto check_dnode_ready_2 +endi +if $data[3][4] != ready then + goto check_dnode_ready_2 +endi + +sql use db; +$ctbPrefix = ctb2 +$ntbPrefix = ntb2 +$tbNum = 10 +$i = 0 +while $i < $tbNum + $ctb = $ctbPrefix . $i + sql create table $ctb using stb1 tags( $i ) + $ntb = $ntbPrefix . $i + sql create table $ntb (ts timestamp, c1 int, c2 float, c3 binary(10)) + $i = $i + 1 +endw + +sleep 1000 +sql use db +sql show stables +if $rows != 2 then + return -1 +endi + +sql show tables +print $rows +if $rows != 60 then + return -1 +endi + + + +$replica = 3 +$vgroups = 5 + +print ============= create database +sql create database db1 replica $replica vgroups $vgroups + +$loop_cnt = 0 +check_db_ready1: +$loop_cnt = $loop_cnt + 1 +sleep 200 +if $loop_cnt == 100 then + print ====> db not ready! + return -1 +endi +sql show databases +print ===> rows: $rows +print $data(db1)[0] $data(db1)[1] $data(db1)[2] $data(db1)[3] $data(db1)[4] $data(db1)[5] $data(db1)[6] $data(db1)[7] $data(db1)[8] $data(db1)[9] $data(db1)[6] $data(db1)[11] $data(db1)[12] $data(db1)[13] $data(db1)[14] $data(db1)[15] $data(db1)[16] $data(db1)[17] $data(db1)[18] $data(db1)[19] +if $rows != 4 then + return -1 +endi +if $data(db1)[19] != ready then + goto check_db_ready1 +endi + + +sql use db1 + +$loop_cnt = 0 +check_vg_ready3: +$loop_cnt = $loop_cnt + 1 +print $loop_cnt +sleep 202 +if $loop_cnt == 300 then + print ====> vgroups not ready! + return -1 +endi + +sql show vgroups +print ===> rows: $rows +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][10] $data[0][11] +if $rows != $vgroups then + return -1 +endi + +if $data[0][4] == LEADER then + if $data[0][6] == FOLLOWER then + if $data[0][8] == FOLLOWER then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][3] + endi + endi +elif $data[0][6] == LEADER then + if $data[0][4] == FOLLOWER then + if $data[0][8] == FOLLOWER then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][5] + endi + endi +elif $data[0][8] == LEADER then + if $data[0][4] == FOLLOWER then + if $data[0][6] == FOLLOWER then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][7] + endi + endi +else + goto check_vg_ready3 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT +system sh/exec.sh -n dnode4 -s stop -x SIGINT + + + diff --git a/tests/script/tsim/sync/3Replica5VgElect.sim b/tests/script/tsim/sync/3Replica5VgElect.sim new file mode 100644 index 0000000000000000000000000000000000000000..4041263e55fa06b93ecea9930ba1dbd728579ce7 --- /dev/null +++ b/tests/script/tsim/sync/3Replica5VgElect.sim @@ -0,0 +1,755 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/deploy.sh -n dnode3 -i 3 +system sh/deploy.sh -n dnode4 -i 4 + +system sh/cfg.sh -n dnode1 -c supportVnodes -v 0 + +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +system sh/exec.sh -n dnode4 -s start + +$loop_cnt = 0 +check_dnode_ready: + $loop_cnt = $loop_cnt + 1 + sleep 200 + if $loop_cnt == 10 then + print ====> dnode not ready! + return -1 + endi +sql show dnodes +print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] +print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] +if $data[0][0] != 1 then + return -1 +endi +if $data[0][4] != ready then + goto check_dnode_ready +endi + +sql connect +sql create dnode $hostname port 7200 +sql create dnode $hostname port 7300 +sql create dnode $hostname port 7400 + +$loop_cnt = 0 +check_dnode_ready_1: +$loop_cnt = $loop_cnt + 1 +sleep 200 +if $loop_cnt == 10 then + print ====> dnodes not ready! + return -1 +endi +sql show dnodes +print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] +print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] +if $data[0][4] != ready then + goto check_dnode_ready_1 +endi +if $data[1][4] != ready then + goto check_dnode_ready_1 +endi +if $data[2][4] != ready then + goto check_dnode_ready_1 +endi +if $data[3][4] != ready then + goto check_dnode_ready_1 +endi + +$replica = 3 +$vgroups = 5 + +print ============= create database +sql create database db replica $replica vgroups $vgroups + +$loop_cnt = 0 +check_db_ready: +$loop_cnt = $loop_cnt + 1 +sleep 200 +if $loop_cnt == 100 then + print ====> db not ready! + return -1 +endi +sql show databases +print ===> rows: $rows +print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] $data[2][7] $data[2][8] $data[2][9] $data[2][6] $data[2][11] $data[2][12] $data[2][13] $data[2][14] $data[2][15] $data[2][16] $data[2][17] $data[2][18] $data[2][19] +if $rows != 3 then + return -1 +endi +if $data[2][19] != ready then + goto check_db_ready +endi + +sql use db + +$loop_cnt = 0 +check_vg_ready: +$loop_cnt = $loop_cnt + 1 +sleep 200 +if $loop_cnt == 300 then + print ====> vgroups not ready! + return -1 +endi + +sql show vgroups +print ===> rows: $rows +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][10] $data[0][11] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] $data[1][7] $data[1][8] $data[1][9] $data[1][10] $data[1][11] +print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] $data[2][7] $data[2][8] $data[2][9] $data[2][10] $data[2][11] +print $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] $data[3][7] $data[3][8] $data[3][9] $data[3][10] $data[3][11] +print $data[4][0] $data[4][1] $data[4][2] $data[4][3] $data[4][4] $data[4][5] $data[4][6] $data[4][7] $data[4][8] $data[4][9] $data[4][10] $data[4][11] +if $rows != $vgroups then + return -1 +endi + +if $data[0][4] == LEADER then + if $data[0][6] == FOLLOWER then + if $data[0][8] == FOLLOWER then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][3] + endi + endi +elif $data[0][6] == LEADER then + if $data[0][4] == FOLLOWER then + if $data[0][8] == FOLLOWER then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][5] + endi + endi +elif $data[0][8] == LEADER then + if $data[0][4] == FOLLOWER then + if $data[0][6] == FOLLOWER then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][7] + endi + endi +else + goto check_vg_ready +endi + +if $data[1][4] == LEADER then + if $data[1][6] == FOLLOWER then + if $data[1][8] == FOLLOWER then + print ---- vgroup $data[1][0] leader locate on dnode $data[1][3] + endi + endi +elif $data[1][6] == LEADER then + if $data[1][4] == FOLLOWER then + if $data[1][8] == FOLLOWER then + print ---- vgroup $data[1][0] leader locate on dnode $data[1][5] + endi + endi +elif $data[1][8] == LEADER then + if $data[1][4] == FOLLOWER then + if $data[1][6] == FOLLOWER then + print ---- vgroup $data[1][0] leader locate on dnode $data[1][7] + endi + endi +else + goto check_vg_ready +endi + +if $data[2][4] == LEADER then + if $data[2][6] == FOLLOWER then + if $data[2][8] == FOLLOWER then + print ---- vgroup $data[2][0] leader locate on dnode $data[2][3] + endi + endi +elif $data[2][6] == LEADER then + if $data[2][4] == FOLLOWER then + if $data[2][8] == FOLLOWER then + print ---- vgroup $data[2][0] leader locate on dnode $data[2][5] + endi + endi +elif $data[2][8] == LEADER then + if $data[2][4] == FOLLOWER then + if $data[2][6] == FOLLOWER then + print ---- vgroup $data[2][0] leader locate on dnode $data[2][7] + endi + endi +else + goto check_vg_ready +endi + +if $data[3][4] == LEADER then + if $data[3][6] == FOLLOWER then + if $data[3][8] == FOLLOWER then + print ---- vgroup $data[3][0] leader locate on dnode $data[3][3] + endi + endi +elif $data[3][6] == LEADER then + if $data[3][4] == FOLLOWER then + if $data[3][8] == FOLLOWER then + print ---- vgroup $data[3][0] leader locate on dnode $data[3][5] + endi + endi +elif $data[3][8] == LEADER then + if $data[3][4] == FOLLOWER then + if $data[3][6] == FOLLOWER then + print ---- vgroup $data[3][0] leader locate on dnode $data[3][7] + endi + endi +else + goto check_vg_ready +endi + +if $data[4][4] == LEADER then + if $data[4][6] == FOLLOWER then + if $data[4][8] == FOLLOWER then + print ---- vgroup $data[4][0] leader locate on dnode $data[4][3] + endi + endi +elif $data[4][6] == LEADER then + if $data[4][4] == FOLLOWER then + if $data[4][8] == FOLLOWER then + print ---- vgroup $data[4][0] leader locate on dnode $data[4][5] + endi + endi +elif $data[4][8] == LEADER then + if $data[4][4] == FOLLOWER then + if $data[4][6] == FOLLOWER then + print ---- vgroup $data[4][0] leader locate on dnode $data[4][7] + endi + endi +else + goto check_vg_ready +endi + +vg_ready: +print ====> create stable/child table +sql create table stb (ts timestamp, c1 int, c2 float, c3 binary(10)) tags (t1 int) + +sql show stables +if $rows != 1 then + return -1 +endi + +$ctbPrefix = ctb +$ntbPrefix = ntb +$tbNum = 10 +$i = 0 +while $i < $tbNum + $ctb = $ctbPrefix . $i + sql create table $ctb using stb tags( $i ) + $ntb = $ntbPrefix . $i + sql create table $ntb (ts timestamp, c1 int, c2 float, c3 binary(10)) + $i = $i + 1 +endw + +$totalTblNum = $tbNum * 2 +sleep 1000 +sql show tables +print ====> expect $totalTblNum and infinsert $rows in fact +if $rows != $totalTblNum then + return -1 +endi + +start_switch_leader: + +$switch_loop_cnt = 0 +sql show vgroups +$dnodeId = $data[0][3] +$dnodeId = dnode . $dnodeId + +switch_leader_to_offine_loop: + +print $dnodeId +print ====> stop $dnodeId +system sh/exec.sh -n $dnodeId -s stop -x SIGINT + + +$loop_cnt = 0 +$loop_cnt = $loop_cnt + 1 +sleep 201 +if $loop_cnt == 300 then + print ====> vgroups switch fail!!! + return -1 +endi +sql show vgroups +print ===> rows: $rows +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][10] $data[0][11] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] $data[1][7] $data[1][8] $data[1][9] $data[1][10] $data[1][11] +print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] $data[2][7] $data[2][8] $data[2][9] $data[2][10] $data[2][11] +print $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] $data[3][7] $data[3][8] $data[3][9] $data[3][10] $data[3][11] +print $data[4][0] $data[4][1] $data[4][2] $data[4][3] $data[4][4] $data[4][5] $data[4][6] $data[4][7] $data[4][8] $data[4][9] $data[4][10] $data[4][11] +if $rows != $vgroups then + return -1 +endi + + +vg_offline_1: + +print ====> start $dnodeId +system sh/exec.sh -n $dnodeId -s start + +$switch_loop_cnt = $switch_loop_cnt + 1 +print $switch_loop_cnt + +if $switch_loop_cnt == 1 then + sql show vgroups + $dnodeId = $data[1][3] + $dnodeId = dnode . $dnodeId + goto switch_leader_to_offine_loop +elif $switch_loop_cnt == 2 then + sql show vgroups + $dnodeId = $data[2][3] + $dnodeId = dnode . $dnodeId + goto switch_leader_to_offine_loop +elif $switch_loop_cnt == 3 then + sql show vgroups + $dnodeId = $data[3][3] + $dnodeId = dnode . $dnodeId + goto switch_leader_to_offine_loop +elif $switch_loop_cnt == 4 then + sql show vgroups + $dnodeId = $data[4][3] + $dnodeId = dnode . $dnodeId + goto switch_leader_to_offine_loop +else + goto stop_leader_to_offine_loop +endi + +stop_leader_to_offine_loop: + +$loop_cnt = 0 +check_vg_ready1: +$loop_cnt = $loop_cnt + 1 +print $loop_cnt +sleep 202 +if $loop_cnt == 300 then + print ====> vgroups not ready! + return -1 +endi + +sql show vgroups +print ===> rows: $rows +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][10] $data[0][11] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] $data[1][7] $data[1][8] $data[1][9] $data[1][10] $data[1][11] +print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] $data[2][7] $data[2][8] $data[2][9] $data[2][10] $data[2][11] +print $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] $data[3][7] $data[3][8] $data[3][9] $data[3][10] $data[3][11] +print $data[4][0] $data[4][1] $data[4][2] $data[4][3] $data[4][4] $data[4][5] $data[4][6] $data[4][7] $data[4][8] $data[4][9] $data[4][10] $data[4][11] +if $rows != $vgroups then + return -1 +endi + +if $data[0][4] == LEADER then + if $data[0][6] == FOLLOWER then + if $data[0][8] == FOLLOWER then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][3] + endi + endi +elif $data[0][6] == LEADER then + if $data[0][4] == FOLLOWER then + if $data[0][8] == FOLLOWER then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][5] + endi + endi +elif $data[0][8] == LEADER then + if $data[0][4] == FOLLOWER then + if $data[0][6] == FOLLOWER then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][7] + endi + endi +else + goto check_vg_ready1 +endi + +if $data[1][4] == LEADER then + if $data[1][6] == FOLLOWER then + if $data[1][8] == FOLLOWER then + print ---- vgroup $data[1][0] leader locate on dnode $data[1][3] + endi + endi +elif $data[1][6] == LEADER then + if $data[1][4] == FOLLOWER then + if $data[1][8] == FOLLOWER then + print ---- vgroup $data[1][0] leader locate on dnode $data[1][5] + endi + endi +elif $data[1][8] == LEADER then + if $data[1][4] == FOLLOWER then + if $data[1][6] == FOLLOWER then + print ---- vgroup $data[1][0] leader locate on dnode $data[1][7] + endi + endi +else + goto check_vg_ready1 +endi + +if $data[2][4] == LEADER then + if $data[2][6] == FOLLOWER then + if $data[2][8] == FOLLOWER then + print ---- vgroup $data[2][0] leader locate on dnode $data[2][3] + endi + endi +elif $data[2][6] == LEADER then + if $data[2][4] == FOLLOWER then + if $data[2][8] == FOLLOWER then + print ---- vgroup $data[2][0] leader locate on dnode $data[2][5] + endi + endi +elif $data[2][8] == LEADER then + if $data[2][4] == FOLLOWER then + if $data[2][6] == FOLLOWER then + print ---- vgroup $data[2][0] leader locate on dnode $data[2][7] + endi + endi +else + goto check_vg_ready1 +endi + +if $data[3][4] == LEADER then + if $data[3][6] == FOLLOWER then + if $data[3][8] == FOLLOWER then + print ---- vgroup $data[3][0] leader locate on dnode $data[3][3] + endi + endi +elif $data[3][6] == LEADER then + if $data[3][4] == FOLLOWER then + if $data[3][8] == FOLLOWER then + print ---- vgroup $data[3][0] leader locate on dnode $data[3][5] + endi + endi +elif $data[3][8] == LEADER then + if $data[3][4] == FOLLOWER then + if $data[3][6] == FOLLOWER then + print ---- vgroup $data[3][0] leader locate on dnode $data[3][7] + endi + endi +else + goto check_vg_ready1 +endi + +if $data[4][4] == LEADER then + if $data[4][6] == FOLLOWER then + if $data[4][8] == FOLLOWER then + print ---- vgroup $data[4][0] leader locate on dnode $data[4][3] + endi + endi +elif $data[4][6] == LEADER then + if $data[4][4] == FOLLOWER then + if $data[4][8] == FOLLOWER then + print ---- vgroup $data[4][0] leader locate on dnode $data[4][5] + endi + endi +elif $data[4][8] == LEADER then + if $data[4][4] == FOLLOWER then + if $data[4][6] == FOLLOWER then + print ---- vgroup $data[4][0] leader locate on dnode $data[4][7] + endi + endi +else + goto check_vg_ready1 +endi + + +print ====> final test: create stable/child table +sql create table stb1 (ts timestamp, c1 int, c2 float, c3 binary(10)) tags (t1 int) + + +sql show stables +if $rows != 2 then + return -1 +endi + +$ctbPrefix = ctb1 +$ntbPrefix = ntb1 +$tbNum = 10 +$i = 0 +while $i < $tbNum + $ctb = $ctbPrefix . $i + sql create table $ctb using stb1 tags( $i ) + $ntb = $ntbPrefix . $i + sql create table $ntb (ts timestamp, c1 int, c2 float, c3 binary(10)) + $i = $i + 1 +endw + +sleep 1000 +sql show stables +if $rows != 2 then + return -1 +endi + +sql show tables +if $rows != 40 then + return -1 +endi + + + +system sh/deploy.sh -n dnode5 -i 5 +system sh/exec.sh -n dnode5 -s start + +sql connect +sql create dnode $hostname port 7500 + +$loop_cnt = 0 +check_dnode_ready3: + $loop_cnt = $loop_cnt + 1 + sleep 200 + if $loop_cnt == 100 then + print ====> dnode not ready! + return -1 + endi + +sql show dnodes +print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] +print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] +print ===> $rows $data[4][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] + +if $rows != 5 then + return -1 +endi + +if $data[4][4] != ready then + goto check_dnode_ready3 +endi + + + +# restart clusters + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT +system sh/exec.sh -n dnode4 -s stop -x SIGINT +system sh/exec.sh -n dnode5 -s stop -x SIGINT + + + +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +system sh/exec.sh -n dnode4 -s start +system sh/exec.sh -n dnode5 -s start + + +$loop_cnt = 0 +check_dnode_ready_2: + $loop_cnt = $loop_cnt + 1 + sleep 200 + if $loop_cnt == 10 then + print ====> dnode not ready! + return -1 + endi +sql show dnodes +print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] +print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] +if $data[0][0] != 1 then + return -1 +endi + +if $data[0][4] != ready then + goto check_dnode_ready_2 +endi +if $data[1][4] != ready then + goto check_dnode_ready_2 +endi +if $data[2][4] != ready then + goto check_dnode_ready_2 +endi +if $data[3][4] != ready then + goto check_dnode_ready_2 +endi + +sql use db; +$ctbPrefix = ctb2 +$ntbPrefix = ntb2 +$tbNum = 10 +$i = 0 +while $i < $tbNum + $ctb = $ctbPrefix . $i + sql create table $ctb using stb1 tags( $i ) + $ntb = $ntbPrefix . $i + sql create table $ntb (ts timestamp, c1 int, c2 float, c3 binary(10)) + $i = $i + 1 +endw + +sleep 1000 +sql use db +sql show stables +if $rows != 2 then + return -1 +endi + +sql show tables +print $rows +if $rows != 60 then + return -1 +endi + + + +$replica = 3 +$vgroups = 5 + +print ============= create database +sql create database db1 replica $replica vgroups $vgroups + +$loop_cnt = 0 +check_db_ready1: +$loop_cnt = $loop_cnt + 1 +sleep 200 +if $loop_cnt == 100 then + print ====> db not ready! + return -1 +endi +sql show databases +print ===> rows: $rows +print $data(db1)[0] $data(db1)[1] $data(db1)[2] $data(db1)[3] $data(db1)[4] $data(db1)[5] $data(db1)[6] $data(db1)[7] $data(db1)[8] $data(db1)[9] $data(db1)[6] $data(db1)[11] $data(db1)[12] $data(db1)[13] $data(db1)[14] $data(db1)[15] $data(db1)[16] $data(db1)[17] $data(db1)[18] $data(db1)[19] +if $rows != 4 then + return -1 +endi +if $data(db1)[19] != ready then + goto check_db_ready1 +endi + + +sql use db1 + +$loop_cnt = 0 +check_vg_ready3: +$loop_cnt = $loop_cnt + 1 +print $loop_cnt +sleep 202 +if $loop_cnt == 300 then + print ====> vgroups not ready! + return -1 +endi + +sql show vgroups +print ===> rows: $rows +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][10] $data[0][11] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] $data[1][7] $data[1][8] $data[1][9] $data[1][10] $data[1][11] +print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] $data[2][7] $data[2][8] $data[2][9] $data[2][10] $data[2][11] +print $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] $data[3][7] $data[3][8] $data[3][9] $data[3][10] $data[3][11] +print $data[4][0] $data[4][1] $data[4][2] $data[4][3] $data[4][4] $data[4][5] $data[4][6] $data[4][7] $data[4][8] $data[4][9] $data[4][10] $data[4][11] +if $rows != $vgroups then + return -1 +endi + +if $data[0][4] == LEADER then + if $data[0][6] == FOLLOWER then + if $data[0][8] == FOLLOWER then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][3] + endi + endi +elif $data[0][6] == LEADER then + if $data[0][4] == FOLLOWER then + if $data[0][8] == FOLLOWER then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][5] + endi + endi +elif $data[0][8] == LEADER then + if $data[0][4] == FOLLOWER then + if $data[0][6] == FOLLOWER then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][7] + endi + endi +else + goto check_vg_ready3 +endi + +if $data[1][4] == LEADER then + if $data[1][6] == FOLLOWER then + if $data[1][8] == FOLLOWER then + print ---- vgroup $data[1][0] leader locate on dnode $data[1][3] + endi + endi +elif $data[1][6] == LEADER then + if $data[1][4] == FOLLOWER then + if $data[1][8] == FOLLOWER then + print ---- vgroup $data[1][0] leader locate on dnode $data[1][5] + endi + endi +elif $data[1][8] == LEADER then + if $data[1][4] == FOLLOWER then + if $data[1][6] == FOLLOWER then + print ---- vgroup $data[1][0] leader locate on dnode $data[1][7] + endi + endi +else + goto check_vg_ready3 +endi + +if $data[2][4] == LEADER then + if $data[2][6] == FOLLOWER then + if $data[2][8] == FOLLOWER then + print ---- vgroup $data[2][0] leader locate on dnode $data[2][3] + endi + endi +elif $data[2][6] == LEADER then + if $data[2][4] == FOLLOWER then + if $data[2][8] == FOLLOWER then + print ---- vgroup $data[2][0] leader locate on dnode $data[2][5] + endi + endi +elif $data[2][8] == LEADER then + if $data[2][4] == FOLLOWER then + if $data[2][6] == FOLLOWER then + print ---- vgroup $data[2][0] leader locate on dnode $data[2][7] + endi + endi +else + goto check_vg_ready3 +endi + +if $data[3][4] == LEADER then + if $data[3][6] == FOLLOWER then + if $data[3][8] == FOLLOWER then + print ---- vgroup $data[3][0] leader locate on dnode $data[3][3] + endi + endi +elif $data[3][6] == LEADER then + if $data[3][4] == FOLLOWER then + if $data[3][8] == FOLLOWER then + print ---- vgroup $data[3][0] leader locate on dnode $data[3][5] + endi + endi +elif $data[3][8] == LEADER then + if $data[3][4] == FOLLOWER then + if $data[3][6] == FOLLOWER then + print ---- vgroup $data[3][0] leader locate on dnode $data[3][7] + endi + endi +else + goto check_vg_ready3 +endi + +if $data[4][4] == LEADER then + if $data[4][6] == FOLLOWER then + if $data[4][8] == FOLLOWER then + print ---- vgroup $data[4][0] leader locate on dnode $data[4][3] + endi + endi +elif $data[4][6] == LEADER then + if $data[4][4] == FOLLOWER then + if $data[4][8] == FOLLOWER then + print ---- vgroup $data[4][0] leader locate on dnode $data[4][5] + endi + endi +elif $data[4][8] == LEADER then + if $data[4][4] == FOLLOWER then + if $data[4][6] == FOLLOWER then + print ---- vgroup $data[4][0] leader locate on dnode $data[4][7] + endi + endi +else + goto check_vg_ready3 +endi + +# sql drop dnode 5 + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT +system sh/exec.sh -n dnode4 -s stop -x SIGINT + + diff --git a/tests/script/tsim/sync/insertDataByRunBack.sim b/tests/script/tsim/sync/insertDataByRunBack.sim index c86cd3844bd3258b5cac4f7b4bbe5dd1c3e0dec2..00f0643b61c3066de4d3bda25f60c54a9cf22084 100644 --- a/tests/script/tsim/sync/insertDataByRunBack.sim +++ b/tests/script/tsim/sync/insertDataByRunBack.sim @@ -20,6 +20,8 @@ print $data[1][0] $data[1][1] $data[1][2] $data[1][3] if $rows == 2 then if $data[1][1] == stop then goto end_insert + elif $data[0][1] == stop then + goto end_insert endi endi @@ -47,6 +49,9 @@ endw if $loop_cnt == 0 then print ====> notify main to working for insert data sql insert into interaction values (now, 'working', 0, 0); + sql select * from interaction + print $data[0][0] $data[0][1] $data[0][2] $data[0][3] + print $data[1][0] $data[1][1] $data[1][2] $data[1][3] endi $loop_cnt = $loop_cnt + 1 goto loop_insert diff --git a/tests/script/tsim/sync/oneReplica1VgElect.sim b/tests/script/tsim/sync/oneReplica1VgElect.sim index bb9b3f449640818d888137721350b0cea90eebae..d98b823192b82556f0327f5107d2d359176e19cb 100644 --- a/tests/script/tsim/sync/oneReplica1VgElect.sim +++ b/tests/script/tsim/sync/oneReplica1VgElect.sim @@ -31,7 +31,7 @@ if $data[0][4] != ready then goto check_dnode_ready endi -#sql connect +sql connect sql create dnode $hostname port 7200 sql create dnode $hostname port 7300 sql create dnode $hostname port 7400 @@ -66,139 +66,94 @@ $vgroups = 1 $replica = 1 print ============= create database -sql create database db replica $replica vgroups $vgroups +sql create database db1 replica $replica vgroups $vgroups $loop_cnt = 0 check_db_ready: $loop_cnt = $loop_cnt + 1 sleep 200 -if $loop_cnt == 10 then - print ====> db not ready! +if $loop_cnt == 100 then + print ====> db1 not ready! return -1 endi sql show databases print ===> rows: $rows -print $data(db)[0] $data(db)[1] $data(db)[2] $data(db)[3] $data(db)[4] $data(db)[5] $data(db)[6] $data(db)[7] $data(db)[8] $data(db)[9] $data(db)[10] $data(db)[11] $data(db)[12] +print $data(db1)[0] $data(db)[1] $data(db)[2] $data(db)[3] $data(db)[4] $data(db)[5] $data(db)[6] $data(db)[7] $data(db)[8] $data(db)[9] $data(db)[10] $data(db)[11] $data(db)[12] print $data(db)[13] $data(db)[14] $data(db)[15] $data(db)[16] $data(db)[17] $data(db)[18] $data(db)[19] $data(db)[20] if $rows != 3 then return -1 endi -if $data(db)[19] != ready then +if $data(db1)[19] != ready then goto check_db_ready endi -sql use db +sql use db1 $loop_cnt = 0 check_vg_ready: $loop_cnt = $loop_cnt + 1 sleep 200 -if $loop_cnt == 10 then +if $loop_cnt == 300 then print ====> vgroups not ready! return -1 endi sql show vgroups print ===> rows: $rows -print $data(2)[0] $data(2)[1] $data(2)[2] $data(2)[3] $data(2)[4] $data(2)[5] $data(2)[6] $data(2)[7] $data(2)[8] $data(2)[9] $data(2)[10] $data(2)[11] $data(2)[12] $data(2)[13] -print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[10][6] $data[0][11] $data[0][12] $data[0][13] +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][6] $data[0][11] $data[0][12] $data[0][13] if $rows != $vgroups then return -1 endi if $data[0][4] == LEADER then - if $data[0][6] != NULL then - goto check_vg_ready - endi - if $data[0][8] != NULL then - goto check_vg_ready - endi print ---- vgroup $data[0][0] leader locate on dnode $data[0][3] - goto vg_ready -endi -if $data[0][6] == LEADER then - if $data[0][4] != NULL then - goto check_vg_ready - endi - if $data[0][8] != NULL then - goto check_vg_ready - endi + goto vg_ready +elif $data[0][6] == LEADER then print ---- vgroup $data[0][0] leader locate on dnode $data[0][5] - goto vg_ready -endi -if $data[0][8] == LEADER then - if $data[0][4] != NULL then - goto check_vg_ready - endi - if $data[0][6] != NULL then - goto check_vg_ready - endi + goto vg_ready +elif $data[0][8] == LEADER then print ---- vgroup $data[0][0] leader locate on dnode $data[0][7] - goto vg_ready + goto vg_ready +else + goto check_vg_ready endi -vg_ready: -print ====> create stable/child table, insert data, and select -sql create table if not exists stb (ts timestamp, c1 int, c2 float, c3 binary(10)) tags (t1 int) +vg_ready: +print ====> create stable/child table +sql create table stb (ts timestamp, c1 int, c2 float, c3 binary(10)) tags (t1 int) sql show stables if $rows != 1 then return -1 endi + $ctbPrefix = ctb $ntbPrefix = ntb $tbNum = 10 -$rowNum = 10 -$tstart = 1640966400000 # 2022-01-01 00:00:00.000 - $i = 0 while $i < $tbNum $ctb = $ctbPrefix . $i sql create table $ctb using stb tags( $i ) $ntb = $ntbPrefix . $i sql create table $ntb (ts timestamp, c1 int, c2 float, c3 binary(10)) - - $x = 0 - while $x < $rowNum - $binary = ' . binary - $binary = $binary . $i - $binary = $binary . ' - - sql insert into $ctb values ($tstart , $i , $x , $binary ) - sql insert into $ntb values ($tstart , 999 , 999 , 'binary-ntb' ) - $tstart = $tstart + 1 - $x = $x + 1 - endw - - print ====> insert rows: $rowNum into $ctb and $ntb - $i = $i + 1 - $tstart = 1640966400000 endw $totalTblNum = $tbNum * 2 +sleep 1000 sql show tables +print ====> expect $totalTblNum and infinsert $rows in fact if $rows != $totalTblNum then return -1 endi -sql select count(*) from ntb0 -print rows: $rows -print $data[0][0] $data[0][1] -if $data[0][0] != $rowNum then - return -1 -endi +start_switch_leader: -$totalRowsOfStb = $rowNum * $tbNum -sql select count(*) from stb -print rows: $rows -print $data[0][0] $data[0][1] -if $data[0][0] != $totalRowsOfStb then - return -1 -endi +$switch_loop_cnt = 0 +switch_leader_to_offine_loop: print ====> finde vnode of leader, and stop the dnode where the vnode is located, and query stb/ntb count(*) sql show vgroups -print $data(2)[0] $data(2)[1] $data(2)[2] $data(2)[3] $data(2)[4] $data(2)[5] $data(2)[6] $data(2)[7] $data(2)[8] $data(2)[9] $data(2)[10] $data(2)[11] $data(2)[12] $data(2)[13] +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][6] $data[0][11] $data[0][12] $data[0][13] if $data[0][4] == LEADER then $dnodeId = $data[0][3] elif $data[0][6] == LEADER then @@ -213,148 +168,78 @@ endi $dnodeId = dnode . $dnodeId print ====> stop $dnodeId system sh/exec.sh -n $dnodeId -s stop -x SIGINT +#print ====> start $dnodeId +#system sh/exec.sh -n $dnodeId -s start $loop_cnt = 0 check_vg_ready_2: $loop_cnt = $loop_cnt + 1 sleep 200 -if $loop_cnt == 10 then +if $loop_cnt == 300 then print ====> vgroups switch fail!!! return -1 endi sql show vgroups print ===> rows: $rows -print $data(2)[0] $data(2)[1] $data(2)[2] $data(2)[3] $data(2)[4] $data(2)[5] $data(2)[6] $data(2)[7] $data(2)[8] $data(2)[9] $data(2)[10] $data(2)[11] $data(2)[12] $data(2)[13] -print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[10][6] $data[0][11] $data[0][12] $data[0][13] +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][6] $data[0][11] $data[0][12] $data[0][13] if $rows != $vgroups then return -1 endi -if $data[0][4] == LEADER then - if $data[0][6] != NULL then - goto check_vg_ready_2 - endi - if $data[0][8] != NULL then - goto check_vg_ready_2 - endi - print ---- vgroup $data[0][0] leader switch to dnode $data[0][3] - goto vg_ready_2 -endi -if $data[0][6] == LEADER then - if $data[0][4] != NULL then - goto check_vg_ready_2 - endi - if $data[0][8] != NULL then - goto check_vg_ready_2 - endi - print ---- vgroup $data[0][0] leader switch to dnode $data[0][5] - goto vg_ready_2 -endi -if $data[0][8] == LEADER then - if $data[0][4] != NULL then - goto check_vg_ready_2 - endi - if $data[0][6] != NULL then - goto check_vg_ready_2 - endi - print ---- vgroup $data[0][0] leader switch to dnode $data[0][7] - goto vg_ready_2 -endi -vg_ready_2: - -sql select count(*) from ntb0 -print rows: $rows -print $data[0][0] $data[0][1] -if $data[0][0] != $rowNum then - return -1 -endi -sql select count(*) from ctb0 -print rows: $rows -print $data[0][0] $data[0][1] -if $data[0][0] != $rowNum then - return -1 -endi +if $data[0][4] == OFFLINE then + print ---- vgroup $dnodeId leader switch to offline + goto vg_offline_1 +elif $data[0][6] == OFFLINE then + print ---- vgroup $dnodeId leader switch to offline + goto vg_offline_1 +elif $data[0][8] == OFFLINE then + print ---- vgroup $dnodeId leader switch to offline + goto vg_offline_1 +else + goto check_vg_ready_2 +endi -sql select count(*) from stb -print rows: $rows -print $data[0][0] $data[0][1] -if $data[0][0] != $totalRowsOfStb then - return -1 -endi +vg_offline_1: -print ====> stop and start all dnode(not include the dnode where mnode is located), then query -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s start -system sh/exec.sh -n dnode3 -s start -system sh/exec.sh -n dnode2 -s start +print ====> start $dnodeId +system sh/exec.sh -n $dnodeId -s start -$loop_cnt = 0 -check_vg_ready_1: -$loop_cnt = $loop_cnt + 1 +$loop_cnt1= 0 +check_vg1_ready: +$loop_cnt1 = $loop_cnt1 + 1 sleep 200 -if $loop_cnt == 10 then - print ====> after restart dnode, vgroups not ready! +if $loop_cnt1 == 300 then + print ====> vgroups not ready! return -1 endi sql show vgroups print ===> rows: $rows -print $data(2)[0] $data(2)[1] $data(2)[2] $data(2)[3] $data(2)[4] $data(2)[5] $data(2)[6] $data(2)[7] $data(2)[8] $data(2)[9] $data(2)[10] $data(2)[11] $data(2)[12] $data(2)[13] -print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[10][6] $data[0][11] $data[0][12] $data[0][13] +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][6] $data[0][11] $data[0][12] $data[0][13] if $rows != $vgroups then return -1 endi if $data[0][4] == LEADER then - if $data[0][6] != NULL then - goto check_vg_ready_1 - endi - if $data[0][8] != NULL then - goto check_vg_ready_1 - endi - goto vg_ready_1 -endi -if $data[0][6] == LEADER then - if $data[0][4] != NULL then - goto check_vg_ready_1 - endi - if $data[0][8] != NULL then - goto check_vg_ready_1 - endi - goto vg_ready_1 -endi -if $data[0][8] == LEADER then - if $data[0][4] != NULL then - goto check_vg_ready_1 - endi - if $data[0][6] != NULL then - goto check_vg_ready_1 - endi - goto vg_ready_1 + print ---- vgroup $data[0][0] leader locate on dnode $data[0][3] + goto countinu_loop +elif $data[0][6] == LEADER then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][5] + goto countinu_loop +elif $data[0][8] == LEADER then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][7] + goto countinu_loop +else + goto check_vg1_ready endi -vg_ready_1: -print ====> after restart dnode2/dnode3/dnode4, query stb/ntb count(*) -sql select count(*) from ntb0 -print rows: $rows -print $data[0][0] $data[0][1] -if $data[0][0] != $rowNum then - return -1 -endi +countinu_loop: -sql select count(*) from ctb0 -print rows: $rows -print $data[0][0] $data[0][1] -if $data[0][0] != $rowNum then - return -1 +$switch_loop_cnt = $switch_loop_cnt + 1 +print $switch_loop_cnt +if $switch_loop_cnt < 4 then + goto switch_leader_to_offine_loop endi -sql select count(*) from stb -print rows: $rows -print $data[0][0] $data[0][1] -if $data[0][0] != $totalRowsOfStb then - return -1 -endi +stop_leader_to_offine_loop: system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode2 -s stop -x SIGINT diff --git a/tests/script/tsim/sync/oneReplica5VgElect.sim b/tests/script/tsim/sync/oneReplica5VgElect.sim new file mode 100644 index 0000000000000000000000000000000000000000..d6d18093c341dfea8d4d2f1c22fa10cab173d71c --- /dev/null +++ b/tests/script/tsim/sync/oneReplica5VgElect.sim @@ -0,0 +1,417 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/deploy.sh -n dnode3 -i 3 +system sh/deploy.sh -n dnode4 -i 4 + +system sh/cfg.sh -n dnode1 -c supportVnodes -v 0 + +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +system sh/exec.sh -n dnode4 -s start + +$loop_cnt = 0 +check_dnode_ready: + $loop_cnt = $loop_cnt + 1 + sleep 200 + if $loop_cnt == 10 then + print ====> dnode not ready! + return -1 + endi +sql show dnodes +print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] +print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] +if $data[0][0] != 1 then + return -1 +endi +if $data[0][4] != ready then + goto check_dnode_ready +endi + +sql connect +sql create dnode $hostname port 7200 +sql create dnode $hostname port 7300 +sql create dnode $hostname port 7400 + +$loop_cnt = 0 +check_dnode_ready_1: +$loop_cnt = $loop_cnt + 1 +sleep 200 +if $loop_cnt == 10 then + print ====> dnodes not ready! + return -1 +endi +sql show dnodes +print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] +print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] +if $data[0][4] != ready then + goto check_dnode_ready_1 +endi +if $data[1][4] != ready then + goto check_dnode_ready_1 +endi +if $data[2][4] != ready then + goto check_dnode_ready_1 +endi +if $data[3][4] != ready then + goto check_dnode_ready_1 +endi + +$replica = 1 +$vgroups = 5 + +print ============= create database +sql create database db1 replica $replica vgroups $vgroups + +$loop_cnt = 0 +check_db_ready: +$loop_cnt = $loop_cnt + 1 +sleep 200 +if $loop_cnt == 100 then + print ====> db1 not ready! + return -1 +endi +sql show databases +print ===> rows: $rows +print $data(db1)[0] $data(db)[1] $data(db)[2] $data(db)[3] $data(db)[4] $data(db)[5] $data(db)[6] $data(db)[7] $data(db)[8] $data(db)[9] $data(db)[10] $data(db)[11] $data(db)[12] +if $rows != 3 then + return -1 +endi +if $data(db1)[19] != ready then + goto check_db_ready +endi + +sql use db1 + +$loop_cnt = 0 +check_vg_ready: +$loop_cnt = $loop_cnt + 1 +sleep 200 +if $loop_cnt == 300 then + print ====> vgroups not ready! + return -1 +endi + +sql show vgroups +print ===> rows: $rows +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][6] $data[0][11] $data[0][12] $data[0][13] +if $rows != $vgroups then + return -1 +endi + +if $data[0][4] == LEADER then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][3] +elif $data[0][6] == LEADER then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][5] +elif $data[0][8] == LEADER then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][7] +else + goto check_vg_ready +endi + +if $data[1][4] == LEADER then + print ---- vgroup $data[1][0] leader locate on dnode $data[0][3] +elif $data[1][6] == LEADER then + print ---- vgroup $data[1][0] leader locate on dnode $data[0][5] +elif $data[1][8] == LEADER then + print ---- vgroup $data[1][0] leader locate on dnode $data[0][7] +else + goto check_vg_ready +endi + +if $data[2][4] == LEADER then + print ---- vgroup $data[2][0] leader locate on dnode $data[0][3] +elif $data[2][6] == LEADER then + print ---- vgroup $data[2][0] leader locate on dnode $data[0][5] +elif $data[2][8] == LEADER then + print ---- vgroup $data[2][0] leader locate on dnode $data[0][7] +else + goto check_vg_ready +endi + +if $data[3][4] == LEADER then + print ---- vgroup $data[3][0] leader locate on dnode $data[0][3] +elif $data[3][6] == LEADER then + print ---- vgroup $data[3][0] leader locate on dnode $data[0][5] +elif $data[3][8] == LEADER then + print ---- vgroup $data[3][0] leader locate on dnode $data[0][7] +else + goto check_vg_ready +endi + +if $data[4][4] == LEADER then + print ---- vgroup $data[4][0] leader locate on dnode $data[0][3] +elif $data[4][6] == LEADER then + print ---- vgroup $data[4][0] leader locate on dnode $data[0][5] +elif $data[4][8] == LEADER then + print ---- vgroup $data[4][0] leader locate on dnode $data[0][7] +else + goto check_vg_ready +endi + +vg_ready: +print ====> create stable/child table +sql create table stb (ts timestamp, c1 int, c2 float, c3 binary(10)) tags (t1 int) + +sql show stables +if $rows != 1 then + return -1 +endi + +$ctbPrefix = ctb +$ntbPrefix = ntb +$tbNum = 10 +$i = 0 +while $i < $tbNum + $ctb = $ctbPrefix . $i + sql create table $ctb using stb tags( $i ) + $ntb = $ntbPrefix . $i + sql create table $ntb (ts timestamp, c1 int, c2 float, c3 binary(10)) + $i = $i + 1 +endw + +$totalTblNum = $tbNum * 2 +sleep 1000 +sql show tables +print ====> expect $totalTblNum and infinsert $rows in fact +if $rows != $totalTblNum then + return -1 +endi + +start_switch_leader: + +$switch_loop_cnt = 0 +sql show vgroups +$dnodeId = $data[0][3] +$dnodeId = dnode . $dnodeId + +switch_leader_to_offine_loop: + +print $dnodeId +print ====> stop $dnodeId +system sh/exec.sh -n $dnodeId -s stop -x SIGINT + + +$loop_cnt = 0 +check_vg_ready_2: +$loop_cnt = $loop_cnt + 1 +sleep 201 +if $loop_cnt == 300 then + print ====> vgroups switch fail!!! + return -1 +endi +sql show vgroups +print ===> rows: $rows +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][6] $data[0][11] $data[0][12] $data[0][13] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] $data[1][7] $data[1][8] $data[1][9] $data[1][6] $data[1][11] $data[1][12] $data[1][13] +print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] $data[2][7] $data[2][8] $data[2][9] $data[2][6] $data[2][11] $data[2][12] $data[2][13] +print $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] $data[3][7] $data[3][8] $data[3][9] $data[3][6] $data[3][11] $data[3][12] $data[3][13] +print $data[4][0] $data[4][1] $data[4][2] $data[4][3] $data[4][4] $data[4][5] $data[4][6] $data[4][7] $data[4][8] $data[4][9] $data[4][6] $data[4][11] $data[4][12] $data[4][13] +if $rows != $vgroups then + return -1 +endi + + +vg_offline_1: + +print ====> start $dnodeId +system sh/exec.sh -n $dnodeId -s start + +$switch_loop_cnt = $switch_loop_cnt + 1 +print $switch_loop_cnt + +if $switch_loop_cnt == 1 then + sql show vgroups + $dnodeId = $data[1][3] + $dnodeId = dnode . $dnodeId + goto switch_leader_to_offine_loop +elif $switch_loop_cnt == 2 then + sql show vgroups + $dnodeId = $data[2][3] + $dnodeId = dnode . $dnodeId + goto switch_leader_to_offine_loop +elif $switch_loop_cnt == 3 then + sql show vgroups + $dnodeId = $data[3][3] + $dnodeId = dnode . $dnodeId + goto switch_leader_to_offine_loop +elif $switch_loop_cnt == 4 then + sql show vgroups + $dnodeId = $data[4][3] + $dnodeId = dnode . $dnodeId + goto switch_leader_to_offine_loop +else + goto stop_leader_to_offine_loop +endi + +stop_leader_to_offine_loop: + +$loop_cnt = 0 +check_vg_ready1: +$loop_cnt = $loop_cnt + 1 +print $loop_cnt +sleep 202 +if $loop_cnt == 300 then + print ====> vgroups not ready! + return -1 +endi + +sql show vgroups +print ===> rows: $rows +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][6] $data[0][11] $data[0][12] $data[0][13] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] $data[1][7] $data[1][8] $data[1][9] $data[1][6] $data[1][11] $data[1][12] $data[1][13] +print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] $data[2][7] $data[2][8] $data[2][9] $data[2][6] $data[2][11] $data[2][12] $data[2][13] +print $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] $data[3][7] $data[3][8] $data[3][9] $data[3][6] $data[3][11] $data[3][12] $data[3][13] +print $data[4][0] $data[4][1] $data[4][2] $data[4][3] $data[4][4] $data[4][5] $data[4][6] $data[4][7] $data[4][8] $data[4][9] $data[4][6] $data[4][11] $data[4][12] $data[4][13] +if $rows != $vgroups then + return -1 +endi + +if $data[0][4] == LEADER then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][3] +elif $data[0][6] == LEADER then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][5] +elif $data[0][8] == LEADER then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][7] +else + goto check_vg_ready1 +endi + +if $data[1][4] == LEADER then + print ---- vgroup $data[1][0] leader locate on dnode $data[1][3] +elif $data[1][6] == LEADER then + print ---- vgroup $data[1][0] leader locate on dnode $data[1][5] +elif $data[1][8] == LEADER then + print ---- vgroup $data[1][0] leader locate on dnode $data[1][7] +else + goto check_vg_ready1 +endi + +if $data[2][4] == LEADER then + print ---- vgroup $data[2][0] leader locate on dnode $data[2][3] +elif $data[2][6] == LEADER then + print ---- vgroup $data[2][0] leader locate on dnode $data[2][5] +elif $data[2][8] == LEADER then + print ---- vgroup $data[2][0] leader locate on dnode $data[2][7] +else + goto check_vg_ready +endi + +if $data[3][4] == LEADER then + print ---- vgroup $data[3][0] leader locate on dnode $data[3][3] +elif $data[3][6] == LEADER then + print ---- vgroup $data[3][0] leader locate on dnode $data[3][5] +elif $data[3][8] == LEADER then + print ---- vgroup $data[3][0] leader locate on dnode $data[3][7] +else + goto check_vg_ready1 +endi + +if $data[4][4] == LEADER then + print ---- vgroup $data[4][0] leader locate on dnode $data[4][3] +elif $data[4][6] == LEADER then + print ---- vgroup $data[4][0] leader locate on dnode $data[4][5] +elif $data[4][8] == LEADER then + print ---- vgroup $data[4][0] leader locate on dnode $data[4][7] +else + goto check_vg_ready1 +endi + + +print ====> final test: create stable/child table +sql create table stb1 (ts timestamp, c1 int, c2 float, c3 binary(10)) tags (t1 int) + + +sql show stables +if $rows != 2 then + return -1 +endi + +$ctbPrefix = ctb1 +$ntbPrefix = ntb1 +$tbNum = 10 +$i = 0 +while $i < $tbNum + $ctb = $ctbPrefix . $i + sql create table $ctb using stb1 tags( $i ) + $ntb = $ntbPrefix . $i + sql create table $ntb (ts timestamp, c1 int, c2 float, c3 binary(10)) + $i = $i + 1 +endw + + +sql show stables +if $rows != 2 then + return -1 +endi + +sql show tables +if $rows != 40 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT +system sh/exec.sh -n dnode4 -s stop -x SIGINT + + + +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +system sh/exec.sh -n dnode4 -s start + + + +$loop_cnt = 0 +check_dnode_ready_2: + $loop_cnt = $loop_cnt + 1 + sleep 200 + if $loop_cnt == 10 then + print ====> dnode not ready! + return -1 + endi +sql show dnodes +print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] +print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] +if $data[0][0] != 1 then + return -1 +endi + +if $data[0][4] != ready then + goto check_dnode_ready_2 +endi +if $data[1][4] != ready then + goto check_dnode_ready_2 +endi +if $data[2][4] != ready then + goto check_dnode_ready_2 +endi +if $data[3][4] != ready then + goto check_dnode_ready_2 +endi + +sql use db1 +sql show stables +if $rows != 2 then + return -1 +endi + +sql show tables +if $rows != 40 then + return -1 +endi + + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT +system sh/exec.sh -n dnode4 -s stop -x SIGINT diff --git a/tests/script/tsim/sync/threeReplica1VgElectWihtInsert.sim b/tests/script/tsim/sync/threeReplica1VgElectWihtInsert.sim index f568008a820c880628af0128bb848297d63d5ffe..fc501096e687c0b7681bbf9e7fcad706f7aafced 100644 --- a/tests/script/tsim/sync/threeReplica1VgElectWihtInsert.sim +++ b/tests/script/tsim/sync/threeReplica1VgElectWihtInsert.sim @@ -155,28 +155,13 @@ while $i < $tbNum sql create table $ctb using stb tags( $i ) $ntb = $ntbPrefix . $i sql create table $ntb (ts timestamp, c1 int, c2 float, c3 binary(10)) - -# $x = 0 -# while $x < $rowNum -# $binary = ' . binary -# $binary = $binary . $i -# $binary = $binary . ' -# -# sql insert into $ctb values ($tstart , $i , $x , $binary ) -# sql insert into $ntb values ($tstart , 999 , 999 , 'binary-ntb' ) -# $tstart = $tstart + 1 -# $x = $x + 1 -# endw - -# print ====> insert rows: $rowNum into $ctb and $ntb - $i = $i + 1 -# $tstart = 1640966400000 endw $totalTblNum = $tbNum * 2 -print ====>totalTblNum:$totalTblNum +sleep 1000 sql show tables +print ====> expect $totalTblNum and infinsert $rows in fact if $rows != $totalTblNum then return -1 endi @@ -222,6 +207,9 @@ endi $dnodeId = dnode . $dnodeId print ====> stop $dnodeId system sh/exec.sh -n $dnodeId -s stop -x SIGINT +sleep 1000 +print ====> start $dnodeId +system sh/exec.sh -n $dnodeId -s start $loop_cnt = 0 check_vg_ready_2: @@ -245,7 +233,7 @@ if $data[0][4] == LEADER then if $data[0][8] != FOLLOWER then goto check_vg_ready_2 endi - print ---- vgroup $data[0][0] leader switch to dnode $data[0][3] + print ---- vgroup $dnodeId leader switch to dnode $data[0][3] goto vg_ready_2 elif $data[0][6] == LEADER then if $data[0][4] != FOLLOWER then @@ -254,7 +242,7 @@ elif $data[0][6] == LEADER then if $data[0][8] != FOLLOWER then goto check_vg_ready_2 endi - print ---- vgroup $data[0][0] leader switch to dnode $data[0][5] + print ---- vgroup $dnodeId leader switch to dnode $data[0][5] goto vg_ready_2 elif $data[0][8] == LEADER then if $data[0][4] != FOLLOWER then @@ -263,7 +251,7 @@ elif $data[0][8] == LEADER then if $data[0][6] != FOLLOWER then goto check_vg_ready_2 endi - print ---- vgroup $data[0][0] leader switch to dnode $data[0][7] + print ---- vgroup $dnodeId leader switch to dnode $data[0][7] goto vg_ready_2 else goto check_vg_ready_2 @@ -272,8 +260,6 @@ vg_ready_2: $switch_loop_cnt = $switch_loop_cnt + 1 if $switch_loop_cnt < 3 then - print ====> start $dnodeId - system sh/exec.sh -n $dnodeId -s start goto switch_leader_loop endi diff --git a/tests/script/tsim/testsuit.sim b/tests/script/tsim/testsuit.sim index e32abe4b7ff8850f9818113bed5f006c2182392e..0b1f0df04e9db6af2547cc1da49873082b2682b3 100644 --- a/tests/script/tsim/testsuit.sim +++ b/tests/script/tsim/testsuit.sim @@ -77,3 +77,4 @@ run sma/tsmaCreateInsertData.sim run sma/rsmaCreateInsertQuery.sim run valgrind/checkError.sim run bnode/basic1.sim + diff --git a/tests/script/tsim/trans/create_db.sim b/tests/script/tsim/trans/create_db.sim index 0db5add88aeb6ea217cfe932ab3600398d3dd886..ae6b7eab160f788db5a1d7fa8f47ed4ffda6e8c8 100644 --- a/tests/script/tsim/trans/create_db.sim +++ b/tests/script/tsim/trans/create_db.sim @@ -64,7 +64,7 @@ if $rows != 1 then return -1 endi -if $data[0][0] != 2 then +if $data[0][0] != 7 then return -1 endi @@ -114,7 +114,7 @@ if $rows != 1 then return -1 endi -if $data[0][0] != 4 then +if $data[0][0] != 9 then return -1 endi @@ -137,7 +137,7 @@ endi sql_error create database d2 vgroups 2; print =============== kill transaction -sql kill transaction 4; +sql kill transaction 9; sleep 2000 sql show transactions diff --git a/tests/script/tsim/trans/lossdata1.sim b/tests/script/tsim/trans/lossdata1.sim new file mode 100644 index 0000000000000000000000000000000000000000..44785934e54e9fadbaa1b65bab7ef37808b18a69 --- /dev/null +++ b/tests/script/tsim/trans/lossdata1.sim @@ -0,0 +1,33 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c transPullupInterval -v 1 +system sh/exec.sh -n dnode1 -s start +sql connect + +print ======= backup sdbdata +system sh/exec.sh -n dnode1 -s stop +system cp ../../../../sim/dnode1/data/mnode/data/sdb.data ../../../../sim/dnode1/data/mnode/data/sdb.data.bak1 +system sh/exec.sh -n dnode1 -s start +sql connect + +print =============== create user1 +sql create user user1 PASS 'user1' +sql create user user2 PASS 'user2' +sql show users +if $rows != 3 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop + +print ======= restore backup data +system cp ../../../../sim/dnode1/data/mnode/data/sdb.data.bak1 ../../../../sim/dnode1/data/mnode/data/sdb.data +system sh/exec.sh -n dnode1 -s start +sql connect + +sql show users +if $rows != 3 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop \ No newline at end of file diff --git a/tests/script/tsim/valgrind/checkError.sim b/tests/script/tsim/valgrind/checkError.sim index 97d16dba9663a77fdf96fe1741d045765a306d42..5790437a671e61dedb90b3384de08b145f2a4cac 100644 --- a/tests/script/tsim/valgrind/checkError.sim +++ b/tests/script/tsim/valgrind/checkError.sim @@ -71,7 +71,7 @@ print ====> start to check if there are ERRORS in vagrind log file for each dnod # -n : dnode[x] be check system_content sh/checkValgrind.sh -n dnode1 print cmd return result----> [ $system_content ] -if $system_content <= 1 then +if $system_content <= 3 then return 0 endi diff --git a/tests/script/unique/account/account_create.sim b/tests/script/unique/account/account_create.sim deleted file mode 100644 index e36de29e7c5835ddc78a9f3eab4b2b4d34634c42..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/account_create.sim +++ /dev/null @@ -1,80 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 0 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -print ============================ dnode1 start - -$i = 0 -$dbPrefix = acdb -$tbPrefix = actb -$db = $dbPrefix . $i -$tb = $tbPrefix . $i -$accountPrefix = acac - -print =============== step1-4 -sql show accounts -if $rows != 1 then - return -1 -endi -sql show users -if $rows != 3 then - return -1 -endi - -$i = 0 -$acc = $accountPrefix . $i -sql_error create account $acc PASS pass123 -sql create account $acc PASS 'pass123' -#sql create account $acc PASS 'pass123' -x step1 -# return -1 -#step1: -sql create user $acc PASS 'pass123' -x step2 - return -1 -step2: - -sql show accounts -if $rows != 2 then - return -1 -endi - -sql show users -if $rows != 3 then - return -1 -endi - -print =============== step5-6 -sql drop account $acc -sql drop account $acc -x step5 - return -1 -step5: -sql show accounts -if $rows != 1 then - return -1 -endi -sql show users -if $rows != 3 then - return -1 -endi - -print =============== step7 -sql create account $acc PASS 'pass123' -#sql create account $acc PASS 'pass123' -x step7 -# return -1 -#step7: - -sql show accounts -if $rows != 2 then - return -1 -endi - -sql drop account $acc -sql show accounts -if $rows != 1 then - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/account/account_delete.sim b/tests/script/unique/account/account_delete.sim deleted file mode 100644 index d99a8b559dc6e04e4d6996e042d915671781d699..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/account_delete.sim +++ /dev/null @@ -1,99 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 0 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -print ============= step1 -sql create account oroot pass 'taosdata' -sql close -sql connect oroot -sleep 2000 - -print ============= step2 -sql create user read pass 'taosdata' -sql create user write pass 'taosdata' - -sql create database d1 -sql create database d2 -sql create table d1.t1 (ts timestamp, i int) -sql create table d2.t2 (ts timestamp, i int) -sql insert into d1.t1 values(now, 1) -sql insert into d2.t2 values(now, 1) -sql insert into d2.t2 values(now+1s, 2) - -sql show databases -if $rows != 2 then - return -1 -endi -sql show users -if $rows != 4 then - return -1 -endi -sql select * from d1.t1 -if $rows != 1 then - return -1 -endi -sql select * from d2.t2 -if $rows != 2 then - return -1 -endi - -print ============= step3 -sql close -sql connect -sleep 2000 - -sql show databases -if $rows != 0 then - return -1 -endi -sql show dnodes -print $data00 $data01 $data02 $data03 -if $data02 != 2 then - return -1 -endi -sql drop account oroot - -print ============= step4 -$x = 0 -show4: - $x = $x + 1 - sleep 2000 - if $x == 10 then - return -1 - endi - -sql show dnodes -if $data02 != 0 then - goto show4 -endi - -print ============= step5 -sql create account oroot pass 'taosdata' - -sql close -sql connect oroot -sleep 2000 - -sql show databases -if $rows != 0 then - return -1 -endi -sql show users -if $rows != 2 then - return -1 -endi - -sql close -sql connect -sleep 2000 -sql drop account oroot -sql show accounts -if $rows != 1 then - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/account/account_len.sim b/tests/script/unique/account/account_len.sim deleted file mode 100644 index f8379bdf954bdde122e68585b973f4957ef15739..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/account_len.sim +++ /dev/null @@ -1,92 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 0 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -$i = 0 -$dbPrefix = aldb -$tbPrefix = altb -$db = $dbPrefix . $i -$tb = $tbPrefix . $i - -print =============== step1 -sql drop account ac -x step0 - return -1 -step0: - -sql create account PASS 123 -x step1 - return -1 -step1: - -sql show accounts -if $rows != 1 then - return -1 -endi - -print =============== step2 -sql drop account a -x step2 -step2: -sql create account a PASS '123' -sql show accounts -if $rows != 2 then - return -1 -endi - -sql drop account a -sql show accounts -if $rows != 1 then - return -1 -endi - -print =============== step3 -sql drop account abc01234567890123456789 -x step3 -step3: -sql create account abc01234567890123456789 PASS '123' -sql show accounts -if $rows != 2 then - return -1 -endi - -sql drop account abc01234567890123456789 -sql show accounts -if $rows != 1 then - return -1 -endi - -print =============== step4 -sql create account abcd01234567890123456789012345689012345 PASS '123' -x step4 - return -1 -step4: -sql show accounts -if $rows != 1 then - return -1 -endi - -print =============== step5 -sql drop account 123 -x step5 -step5: -sql create account 123 pass '123' -x step51 - return -1 -step51: - -sql create account a123 PASS '123' -sql show accounts -if $rows != 2 then - return -1 -endi - -sql drop account a123 -sql show accounts -if $rows != 1 then - return -1 -endi - -sql show users -if $rows != 3 then - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/account/authority.sim b/tests/script/unique/account/authority.sim deleted file mode 100644 index 8f2408de1429a8ea34add79e335f6bf7f42ca2b0..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/authority.sim +++ /dev/null @@ -1,346 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 0 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -print ============= step1 - -sql create user read pass 'taosdata' -sql create user write pass 'taosdata' -sql create user manage pass 'taosdata' - -sql create user a PASS 'ade' privilege -x step11 - return -1 -step11: - -sql create user a PASS 'ade' privilege a -x step12 - return -1 -step12: - -sql create user a PASS 'ade' privilege read -x step13 - return -1 -step13: - -sql show accounts -if $rows != 1 then - return -1 -endi -sql show users -if $rows != 6 then - return -1 -endi - -sql alter user read privilege read -sql alter user write privilege write -sql_error alter user manage privilege super - -print ============= step2 -sql close -sql connect write -sleep 2000 - -sql create database d1 -sql create database d2 -sql create table d1.t1 (ts timestamp, i int) -sql create table d2.t2 (ts timestamp, i int) -sql insert into d1.t1 values(now, 1) -sql insert into d2.t2 values(now, 1) -sql insert into d2.t2 values(now+1s, 2) - -sql show accounts -if $rows != 1 then - return -1 -endi -sql show users -if $rows != 6 then - return -1 -endi -sql show databases -if $rows != 2 then - return -1 -endi -sql select * from d1.t1 -if $rows != 1 then - return -1 -endi -sql select * from d2.t2 -if $rows != 2 then - return -1 -endi - -sql create account t1 pass 'taosdata' -x step21 - return -1 -step21: - -sql create user t1 pass 'taosdata' -x step22 - return -1 -step22: - -sql alter user read pass 'taosdata' -x step23 - return -1 -step23: - -sql create dnode $hostname2 -x step24 - return -1 -step24: - -sql drop dnode $hostname2 -x step25 - return -1 -step25: - -sql create mnode 192.168.0.2 -x step26 - return -1 -step26: - -sql drop mnode 192.168.0.2 -x step27 - return -1 -step27: - -sql drop account root -x step28 - return -1 -step28: - -sql alter user write pass 'taosdata' - -print ============= step3 -sql close -sql connect read -sleep 2000 - -sql create database d3 -x step31 - return -1 -step31: - -sql create table d1.t3 (ts timestamp, i int) -x step32 - return -1 -step32: - -#sql insert into d1.t1 values(now, 2) -x step33 -# return -1 -#step33: - -sql show accounts -if $rows != 1 then - return -1 -endi -sql show users -if $rows != 6 then - return -1 -endi -sql show databases -if $rows != 2 then - return -1 -endi -sql select * from d1.t1 -if $rows != 1 then - return -1 -endi - -sql select * from d2.t2 -if $rows != 2 then - return -1 -endi - -sql sql create account t1 pass 'taosdata' -x step34 - return -1 -step34: - -sql sql create user t1 pass 'taosdata' -x step35 - return -1 -step35: - -print ============= step4 -sql close -sql connect manage -sleep 2000 - -sql create database d3 -sql create database d4 -sql create table d3.t3 (ts timestamp, i int) -sql create table d4.t4 (ts timestamp, i int) - -sql show accounts -if $rows != 1 then - return -1 -endi -sql show users -if $rows != 6 then - return -1 -endi -sql show databases -if $rows != 4 then - return -1 -endi -sql select * from d1.t1 -if $rows != 1 then - return -1 -endi -sql select * from d2.t2 -if $rows != 2 then - return -1 -endi - -sql create account other pass 'taosdata' -x step41 - return -1 -step41: - -sql close -sql connect -sleep 2000 -sql create account other pass 'taosdata' - -print ============= step5 -sql close -sql connect other -sleep 2000 -sql create user read pass 'taosdata' -x step51 - return -1 -step51: -sql create other write pass 'taosdata' -x step52 - return -1 -step52: - -sql create user oread pass 'taosdata' -sql create user owrite pass 'taosdata' -sql create user omanage pass 'taosdata' - -sql show users -print show users $rows -if $rows != 5 then - return -1 -endi - -sql alter user oread privilege read -sql alter user owrite privilege write -sql alter user oroot privilege super -x step53 - return -1 -step53: -sql alter user read privilege read -x step54 - return -1 -step54: - -print ============= step6 -sql close -sql connect owrite -sleep 2000 -sql reset query cache -sleep 1000 -sql create database d1 -sql create database d3 -sql create table d1.t1 (ts timestamp, i int) -sql create table d3.t3 (ts timestamp, i int) -sql insert into d1.t1 values(now, 11) -sql insert into d3.t3 values(now, 11) -sql insert into d3.t3 values(now+1s, 12) - -sql show databases -if $rows != 2 then - return -1 -endi -sql select * from d1.t1 -if $rows != 1 then - return -1 -endi -sql select * from d2.t2 -x step6 - return -1 -step6: -sql select * from d3.t3 -if $rows != 2 then - return -1 -endi - -sql sql create account t1 pass 'taosdata' -x step61 - return -1 -step61: - -sql sql create user t1 pass 'taosdata' -x step62 - return -1 -step62: - -print ============= step7 -sql close -sql connect oread -sleep 2000 - -sql create database d7 -x step71 - return -1 -step71: - -sql show databases -if $rows != 2 then - return -1 -endi -sql select * from d1.t1 -if $rows != 1 then - return -1 -endi -sql select * from d2.t2 -x step72 - return -1 -step72: -sql select * from d3.t3 -if $rows != 2 then - return -1 -endi - -sql sql create account t1 pass 'taosdata' -x step73 - return -1 -step73: - -sql sql create user t1 pass 'taosdata' -x step74 - return -1 -step74: - -print ============= step8 -sql close -sql connect omanage -sleep 2000 - -sql create account t1 pass 'taosdata' -x step81 - return -1 -step81: - -sql create database d4 -sql create table d4.t4 (ts timestamp, i int) - -sql show databases -if $rows != 3 then - return -1 -endi -sql select * from d1.t1 -if $rows != 1 then - return -1 -endi -sql select * from d2.t2 -x step82 - return -1 -step82: -sql select * from d3.t3 -if $rows != 2 then - return -1 -endi - -print ============= step9 -sql close -sql connect -sleep 2000 -sql show databases -if $rows != 4 then - return -1 -endi - -sql drop account other -sql drop user read -sql drop user manage -sql drop user write - -sql close -sql connect -sleep 2000 -sql drop database d1 -sql drop database d2 -sql drop database d3 -sql drop database d4 - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/account/basic.sim b/tests/script/unique/account/basic.sim deleted file mode 100644 index 00e706a4482d9fa57ed2f97a9995ce84d3667fa1..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/basic.sim +++ /dev/null @@ -1,46 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/exec.sh -n dnode1 -s start -sleep 2000 -sql connect - -print =============== show accounts -sql show accounts -if $rows != 1 then - return -1 -endi - -print $data00 $data01 $data02 - -print =============== create account1 -sql create account account1 PASS 'account1' -sql show accounts -if $rows != 2 then - return -1 -endi - -print $data00 $data01 $data02 -print $data10 $data11 $data22 - -print =============== create account2 -sql create account account2 PASS 'account2' -sql show accounts -if $rows != 3 then - return -1 -endi - -print $data00 $data01 $data02 -print $data10 $data11 $data22 -print $data20 $data11 $data22 - -print =============== drop account1 -sql drop account account1 -sql show accounts -if $rows != 2 then - return -1 -endi - -print $data00 $data01 $data02 -print $data10 $data11 $data22 - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/account/paras.sim b/tests/script/unique/account/paras.sim deleted file mode 100644 index 102f5b6a381e5100b35a4f0125b1318bcb8b1d76..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/paras.sim +++ /dev/null @@ -1,114 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/exec.sh -n dnode1 -s start -sleep 2000 -sql connect - -print =============== show accounts -sql show accounts -if $rows != 1 then - return -1 -endi - -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != root then - return -1 -endi -if $data02 != 3/128 then - return -1 -endi -if $data03 != 0/128 then - return -1 -endi -if $data04 != 0/2147483647 then - return -1 -endi -if $data05 != 0/1000 then - return -1 -endi -if $data06 != 0.000/unlimited then - return -1 -endi - -print =============== create account -sql create account hou pass "hou" tseries 80000 storage 10737418240 streams 10 qtime 3600 dbs 3 users 3 conns 10 -sql show accounts -if $rows != 2 then - return -1 -endi - -print $data10 $data11 $data12 $data13 $data14 $data15 $data16 -if $data10 != hou then - return -1 -endi -if $data12 != 2/3 then - return -1 -endi -if $data13 != 0/3 then - return -1 -endi -if $data14 != 0/80000 then - return -1 -endi -if $data15 != 0/10 then - return -1 -endi -if $data16 != 0.000/10.000 then - return -1 -endi - -print =============== alter account -sql alter account hou pass "hou" tseries 8000 streams 10 dbs 5 users 5 -sql show accounts -if $rows != 2 then - return -1 -endi - -print $data10 $data11 $data12 $data13 $data14 $data15 $data16 -if $data10 != hou then - return -1 -endi -if $data12 != 2/5 then - return -1 -endi -if $data13 != 0/5 then - return -1 -endi -if $data14 != 0/8000 then - return -1 -endi -if $data15 != 0/10 then - return -1 -endi -if $data16 != 0.000/10.000 then - return -1 -endi - -print =============== alter account -sql create account hou pass "hou" tseries 8000 streams 10 dbs 5 users 6 -sql show accounts -if $rows != 2 then - return -1 -endi - -print $data10 $data11 $data12 $data13 $data14 $data15 $data16 -if $data10 != hou then - return -1 -endi -if $data12 != 2/6 then - return -1 -endi -if $data13 != 0/5 then - return -1 -endi -if $data14 != 0/8000 then - return -1 -endi -if $data15 != 0/10 then - return -1 -endi -if $data16 != 0.000/10.000 then - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/account/pass_alter.sim b/tests/script/unique/account/pass_alter.sim deleted file mode 100644 index 8b857b014a292d53536c5acf2a00daa15be11239..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/pass_alter.sim +++ /dev/null @@ -1,116 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 0 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -print ============= step1 -sql create user read pass 'taosdata1' -sql create user write pass 'taosdata1' - -sql alter user read pass 'taosdata' -sql alter user write pass 'taosdata' - -sql show accounts -if $rows != 1 then - return -1 -endi -sql show users -if $rows != 5 then - return -1 -endi - -print ============= step2 -sql close -sql connect read -sleep 2000 -sql alter user read pass 'taosdata' -sql alter user write pass 'taosdata1' -x step2 - return -1 -step2: - - -print ============= step3 -sql close -sql connect write -sleep 2000 -sql alter user write pass 'taosdata' -sql alter user read pass 'taosdata' -x step3 - return -1 -step3: - -print ============= step4 -sql close -sleep 1000 -sql connect -sleep 2000 -sql create account oroot pass 'taosdata' -sql show accounts -if $rows != 2 then - return -1 -endi -sql show users -if $rows != 5 then - return -1 -endi - -print ============= step5 -sql close -sql connect oroot -sleep 2000 - -sql create user oread pass 'taosdata1' -sql create user owrite pass 'taosdata1' -sql alter user oread pass 'taosdata' -sql alter user owrite pass 'taosdata' - -sql create user read pass 'taosdata1' -x step51 - return -1 -step51: -sql alter user read pass 'taosdata1' -x step52 - return -1 -step52: - -sql show accounts -x step53 - return -1 -step53: -sql show users -print show users $rows -if $rows != 4 then - return -1 -endi - -print ============= step6 -sql close -sql connect oread -sleep 2000 -sql alter user oread pass 'taosdata' -sql alter user owrite pass 'taosdata1' -x step6 - return -1 -step6: - - -print ============= step7 -sql close -sql connect owrite -sleep 2000 -sql alter user owrite pass 'taosdata' -sql alter user oread pass 'taosdata' -x step7 - return -1 -step7: - -print ============= step8 -sql close -sql connect -sleep 2000 -sql alter user oread pass 'taosdata' -sql alter user owrite pass 'taosdata' -sql alter user oroot pass 'taosdata' - -sql drop account oroot -sql drop user read -sql drop user write - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/account/pass_len.sim b/tests/script/unique/account/pass_len.sim deleted file mode 100644 index f4ceb76f7b8b41873217bd11ae2c3d385386b0e9..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/pass_len.sim +++ /dev/null @@ -1,81 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 0 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -$i = 0 -$dbPrefix = apdb -$tbPrefix = aptb -$db = $dbPrefix . $i -$tb = $tbPrefix . $i -$userPrefix = apusr - -print =============== step1 -$i = 0 -$user = $userPrefix . $i - -sql drop user $user -x step11 - return -1 -step11: - -sql create user $user PASS -x step12 - return -1 -step12: - -sql create user $user PASS 'taosdata' - -sql show users -if $rows != 4 then - return -1 -endi - -print =============== step2 -$i = 1 -$user = $userPrefix . $i -sql drop user $user -x step2 -step2: -sql create user $user PASS '1' -sql show users -if $rows != 5 then - return -1 -endi - -print =============== step3 -$i = 2 -$user = $userPrefix . $i -sql drop user $user -x step3 -step3: - -sql create user $user PASS 'abc0123456789' -sql show users -if $rows != 6 then - return -1 -endi - -print =============== step4 -$i = 3 -$user = $userPrefix . $i -sql create user $user PASS 'abcd012345678901234567891234567890' -x step4 - return -1 -step4: -sql show users -if $rows != 6 then - return -1 -endi - -$i = 0 -while $i < 3 - $user = $userPrefix . $i - sql drop user $user - $i = $i + 1 -endw - -sql show users -if $rows != 3 then - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/account/testSuite.sim b/tests/script/unique/account/testSuite.sim deleted file mode 100644 index 9d4141cfe0c086f9a8863fffb00a9cb0f410e265..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/testSuite.sim +++ /dev/null @@ -1,11 +0,0 @@ -run unique/account/account_create.sim -run unique/account/account_delete.sim -run unique/account/account_len.sim -run unique/account/authority.sim -run unique/account/basic.sim -run unique/account/paras.sim -run unique/account/pass_alter.sim -run unique/account/pass_len.sim -run unique/account/usage.sim -run unique/account/user_create.sim -run unique/account/user_len.sim diff --git a/tests/script/unique/account/usage.sim b/tests/script/unique/account/usage.sim deleted file mode 100644 index 3b9c20b159a6237f469fc1e48b5b3a3f4ca5f7b8..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/usage.sim +++ /dev/null @@ -1,154 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/exec.sh -n dnode1 -s start -#system sh/exec.sh -n monitor -s 1 -system sh/exec.sh -n monitorInterval -s 1 -sleep 2000 -sql connect - -print =============== show accounts - -print =============== create account -sql alter account root pass "taosdata" tseries 8000 streams 10 dbs 5 users 5 -sql show accounts -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != root then - return -1 -endi -if $data02 != 3/5 then - return -1 -endi -if $data03 != 0/5 then - return -1 -endi -if $data04 != 0/8000 then - return -1 -endi -if $data05 != 0/10 then - return -1 -endi -if $data06 != 0.000/unlimited then - return -1 -endi - -print =============== check usage account -sql create database d1 wal 2 -sql create database d2 wal 2 -sql create database d3 wal 2 -sql create database d4 wal 2 -sql create database d5 wal 2 - -sql create table d1.t1 (ts timestamp, i int); -sql create user u1 pass "u1" - -sql show accounts -print $data10 $data11 $data12 $data13 $data14 $data15 $data16 -if $data00 != root then - return -1 -endi -if $data02 != 4/5 then - return -1 -endi -if $data03 != 5/5 then - return -1 -endi -if $data04 != 1/8000 then - return -1 -endi -if $data05 != 0/10 then - return -1 -endi -if $data06 != 0.000/unlimited then - return -1 -endi - -print =============== step2 -sql alter account root pass "taosdata" tseries 10 storage 1073741824 streams 10 dbs 5 users 5 -sql show accounts -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != root then - return -1 -endi -if $data02 != 4/5 then - return -1 -endi -if $data03 != 5/5 then - return -1 -endi -if $data04 != 1/10 then - return -1 -endi -if $data05 != 0/10 then - return -1 -endi -if $data06 != 0.000/1.000 then - return -1 -endi - -print =============== step3 -sql alter account root pass "taosdata" tseries 10 storage 16 streams 10 dbs 5 users 5 -sql show accounts -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != root then - return -1 -endi -if $data02 != 4/5 then - return -1 -endi -if $data03 != 5/5 then - return -1 -endi -if $data04 != 1/10 then - return -1 -endi -if $data05 != 0/10 then - return -1 -endi -if $data06 != 0.000/0.000 then - return -1 -endi - -print =============== step4 -sql insert into d1.t1 values(now + 1s, 1) -sql insert into d1.t1 values(now + 2s, 2) - -sleep 10000 -print no write auth -sql_error insert into d1.t1 values(now + 3s, 2) -sql_error insert into d1.t1 values(now + 4s, 2) - -sql alter account root pass "taosdata" tseries 10 storage 36 streams 10 dbs 5 users 5 -sleep 10000 -print has write auth -sql insert into d1.t1 values(now + 5s, 1) -sql insert into d1.t1 values(now + 6s, 2) - -# no write auth -sleep 10000 -print no write auth -sql_error insert into d1.t1 values(now + 7s, 2) -sql_error insert into d1.t1 values(now + 8s, 2) - -print =============== step5 -sql alter account root pass "taosdata" tseries 10 storage 3600 streams 10 dbs 5 users 5 state all -sleep 10000 - -sql insert into d1.t1 values(now + 11s, 1) -sql insert into d1.t1 values(now + 12s, 2) - -sql alter account root pass "taosdata" tseries 10 storage 3600 streams 10 dbs 5 users 5 state no -sleep 10000 -print no write auth -sql_error insert into d1.t1 values(now + 13s, 2) -sql_error insert into d1.t1 values(now + 14s, 2) - -sql alter account root pass "taosdata" tseries 10 storage 3600 streams 10 dbs 5 users 5 state all -sleep 10000 -print has write auth -sql insert into d1.t1 values(now + 15s, 1) -sql insert into d1.t1 values(now + 16s, 2) - -print =============== check grant -sql_error create database d6 - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/account/user_create.sim b/tests/script/unique/account/user_create.sim deleted file mode 100644 index e54a380f0dbef8107de452354ea01bc58262d548..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/user_create.sim +++ /dev/null @@ -1,84 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 0 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -print =============== step1 -sql show users -if $rows != 3 then - return -1 -endi - -sql create user read PASS 'pass123' -sql create user read PASS 'pass123' -x step1 - return -1 -step1: - -sql show users -if $rows != 4 then - return -1 -endi - -sql alter user read PASS 'taosdata' - -print =============== step2 -sql close -sql connect read -sleep 2000 - -sql alter user read PASS 'taosdata' - -print =============== step3 -sql drop user read -x step31 - return -1 -step31: -sql drop user _root -x step32 - return -1 -step32: -sql drop user monitor -x step33 - return -1 -step33: - -print =============== step4 -sql close -sql connect -sleep 2000 - -sql alter user read privilege read -sql show users -print $data1_read -if $data1_read != readable then - return -1 -endi - -sql_error alter user read privilege super -sql show users -print $data1_read -if $data1_read != readable then - return -1 -endi - -sql alter user read privilege write -sql show users -if $data1_read != writable then - return -1 -endi - -sql alter user read privilege 1 -x step43 - return -1 -step43: - -sql drop user _root -x step41 - return -1 -step41: - -sql drop user monitor -x step42 - return -1 -step42: - -sql drop user read - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/account/user_len.sim b/tests/script/unique/account/user_len.sim deleted file mode 100644 index b8d448f0ffc9e43cbc0f0a5a849bda215e72e790..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/user_len.sim +++ /dev/null @@ -1,94 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 0 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -$i = 0 -$dbPrefix = lm_us_db -$tbPrefix = lm_us_tb -$db = $dbPrefix . $i -$tb = $tbPrefix . $i - -print =============== step1 -sql drop user ac -x step0 - return -1 -step0: - -sql create user PASS '123' -x step1 - return -1 -step1: - -sql show users -if $rows != 3 then - return -1 -endi - -print =============== step2 -sql drop user a -x step2 -step2: -sleep 1000 -sql create user a PASS '123' -sql show users -if $rows != 4 then - return -1 -endi - -sql drop user a -sql show users -if $rows != 3 then - return -1 -endi - -print =============== step3 -sql drop user abc01234567890123456789 -x step3 -step3: - -sql create user abc01234567890123456789 PASS '123' -sql show users -if $rows != 4 then - return -1 -endi - -sql drop user abc01234567890123456789 -sql show users -if $rows != 3 then - return -1 -endi - -print =============== step4 -sql create user abcd0123456789012345678901234567890111 PASS '123' -x step4 - return -1 -step4: -sql show users -if $rows != 3 then - return -1 -endi - -print =============== step5 -sql drop user 123 -x step5 -step5: -sql create user 123 PASS '123' -x step61 - return -1 -step61: - -sql create user a123 PASS '123' -sql show users -if $rows != 4 then - return -1 -endi - -sql drop user a123 -sql show users -if $rows != 3 then - return -1 -endi - -sql show accounts -if $rows != 1 then - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/http/admin.sim b/tests/script/unique/http/admin.sim deleted file mode 100644 index ae206744c4e93ab7cebd5f4db7d8d4b84ad5ebbb..0000000000000000000000000000000000000000 --- a/tests/script/unique/http/admin.sim +++ /dev/null @@ -1,192 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c http -v 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 0 -#system sh/cfg.sh -n dnode1 -c adminRowLimit -v 10 -system sh/cfg.sh -n dnode1 -c httpDebugFlag -v 135 -system sh/exec.sh -n dnode1 -s start - -sql connect -sleep 2000 - -print ============================ dnode1 start - -print =============== step0 - prepare data -sql create database d1 -sql use d1 - -sql create table table_admin (ts timestamp, i int) - -sql insert into table_admin values('2017-12-25 21:28:41.022', 1) -sql insert into table_admin values('2017-12-25 21:28:42.022', 2) -sql insert into table_admin values('2017-12-25 21:28:43.022', 3) -sql insert into table_admin values('2017-12-25 21:28:44.022', 4) -sql insert into table_admin values('2017-12-25 21:28:45.022', 5) -sql insert into table_admin values('2017-12-25 21:28:46.022', 6) -sql insert into table_admin values('2017-12-25 21:28:47.022', 7) -sql insert into table_admin values('2017-12-25 21:28:48.022', 8) -sql insert into table_admin values('2017-12-25 21:28:49.022', 9) -sql insert into table_admin values('2017-12-25 21:28:50.022', 10) - -print =============== step1 - login - -system_content curl 127.0.0.1:7111/admin/ -print 1-> $system_content -if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then - print actual: $system_content - return -1 -endi - -system_content curl 127.0.0.1:7111/admin/xx -print 2-> $system_content -if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then - return -1 -endi - -system_content curl 127.0.0.1:7111/admin/login -print 3-> $system_content -if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then - return -1 -endi - -system_content curl 127.0.0.1:7111/admin/login/root -print 4-> $system_content -if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then - return -1 -endi - -system_content curl 127.0.0.1:7111/admin/login/root/123 -print 5-> $system_content -if $system_content != @{"status":"error","code":3,"desc":"Authentication failure"}@ then - return -1 -endi - -system_content curl 127.0.0.1:7111/admin/login/root/123/1/1/3 -print 6-> $system_content -if $system_content != @{"status":"error","code":3,"desc":"Authentication failure"}@ then - return -1 -endi - -system_content curl -H 'Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.' -d 'show databases' 127.0.0.1:7111/admin/login/root/1 -print 7-> $system_content -if $system_content != @{"status":"error","code":4387,"desc":"invalid format of Authorization"}@ then - return -1 -endi - -system_content curl -H 'Authorization: Taosd eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' 127.0.0.1:7111/admin/login/root/1 -print 8-> $system_content -if $system_content != @{"status":"error","code":4387,"desc":"invalid format of Authorization"}@ then - return -1 -endi - -sleep 2000 -system_content curl 127.0.0.1:7111/admin/login/root/taosdata -print 9 -----> $system_content - -if $system_content != {"status":"succ","code":0,"desc":"/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04"} then - return -1 -endi - -#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:7111/admin/login/root/1 -#print 10-> $system_content -#if $system_content != @{"status":"error","code":29,"desc":"failed to connect to server"}@ then -# return -1 -#endi - -print =============== step2 - logout - -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:7111/admin/logout -print 10 -----> $system_content - -if $system_content != @{"status":"succ","code":0,"desc":"logout success"}@ then - return -1 -endi - -system_content curl 127.0.0.1:7111/admin/logout -print 11 -----> $system_content - -if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then - return -1 -endi - -print =============== step3 - info - -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:7111/admin/info -print curl 127.0.0.1:7111/admin/info -----> $system_content -if $system_content != {"status":"succ","data":[{"dbs":1,"tables":1,"users":3,"mnodes":1,"dnodes":1}]} then - return -1 -endi - -print =============== step4 - meta - -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show mnodes' 127.0.0.1:7111/admin/meta -print curl 127.0.0.1:7111/admin/meta -----> $system_content -#if $system_content != @{"status":"succ","head":["column type","column name","column bytes"],"data":[["binary","IP",16],["timestamp","created time",8],["binary","status",10],["binary","role",10],["binary","public ip",16]],"rows":5}@ then -# return -1 -#endi - -print =============== step5 - query data - -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/sql -print curl 127.0.0.1:7111/admin/all -----> $system_content -if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10]],"rows":10}@ then - return -1 -endi - -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/sql -print curl 127.0.0.1:7111/admin/sql -----> $system_content -if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10]],"rows":10}@ then - return -1 -endi - -print =============== step6 - insert data -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.table_admin values('2017-12-25 21:28:51.022', 11)" 127.0.0.1:7111/admin/sql -print curl 127.0.0.1:7111/admin/sql -----> $system_content -if $system_content != @{"status":"succ","head":["affect_rows"],"data":[[1]],"rows":1}@ then - return -1 -endi - -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/all -print curl 127.0.0.1:7111/admin/all -----> $system_content -if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:51.022",11]],"rows":11}@ then - print actual: $system_content - print expect =======> {"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:51.022",11]],"rows":11} - return -1 -endi - -#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/sql -#print curl 127.0.0.1:7111/admin/sql -----> $system_content -#if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:51.022",11],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:42.022",2]],"rows":10}@ then -# return -1 -#endi - -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:7111/admin/info -print curl 127.0.0.1:7111/admin/info -----> $system_content -if $system_content != {"status":"succ","data":[{"dbs":1,"tables":1,"users":3,"mnodes":1,"dnodes":1}]} then - return -1 -endi - -print =============== step7 - use dbs - -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'use d1;' 127.0.0.1:7111/admin/all -print 23-> $system_content -if $system_content != @{"status":"error","code":4360,"desc":"no need to execute use db cmd"}@ then - return -1 -endi - -print =============== step8 - monitor dbs -#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show dnodes;show mnodes;' 127.0.0.1:7111/admin/sqls -#print 24-> $system_content -#if $system_content != @[{"status":"succ","head":["IP","created time","open vnodes","free vnodes","status","balance state"],"data":[["127.0.0.1","2018-09-04 #11:16:13.985",1,3,"ready","balanced"]],"rows":1},{"status":"succ","head":["IP","created time","status","role"],"data":[["127.0.0.1","2018-09-04 11:16:13.371","serving","master"]],"rows":1}]@ then -# return -1 -# endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT -system sh/exec.sh -n dnode5 -s stop -x SIGINT -system sh/exec.sh -n dnode6 -s stop -x SIGINT -system sh/exec.sh -n dnode7 -s stop -x SIGINT -system sh/exec.sh -n dnode8 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/http/opentsdb.sim b/tests/script/unique/http/opentsdb.sim deleted file mode 100644 index 7d1e6b03d4547a6b0b2a6a7857000a8a6518a002..0000000000000000000000000000000000000000 --- a/tests/script/unique/http/opentsdb.sim +++ /dev/null @@ -1,247 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c http -v 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 0 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -print ============================ dnode1 start - -print =============== step1 - parse -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/ -print $system_content -if $system_content != @{"status":"error","code":4496,"desc":"database name can not be null"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db123456789012345678901234567890db -print $system_content -if $system_content != @{"status":"error","code":4497,"desc":"database name too long"}@ then - return -1 -endi - -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/ -print $system_content -if $system_content != @{"status":"error","code":4496,"desc":"database name can not be null"}@ then - return -1 -endi - -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put2 -print $system_content -if $system_content != @{"status":"error","code":4354,"desc":"invalid url format"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4499,"desc":"metrics size is 0"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4498,"desc":"invalid opentsdb json fromat"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '{}' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4499,"desc":"metrics size is 0"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content - -if $system_content != @{"status":"error","code":4501,"desc":"metric name not find"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": 1,"timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4502,"desc":"metric name type should be string"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4503,"desc":"metric name length is 0"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "ab1234567890123456789012345678ab1234567890123456789012345678","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"errors":[{"datapoint":{"metric":"ab1234567890123456789012345678ab1234567890123456789012345678","stable":"ab1234567890123456789012345678ab1234567890123456789012345678_d_bbb","table":"ab1234567890123456789012345678ab1234567890123456789012345678_d_bbb_lga_1_web01","timestamp":1346846400,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"status":"error","code":1547,"desc":"Timestamp data out of range"}}],"failed":1,"success":0,"affected_rows":0}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4505,"desc":"timestamp not find"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": "2","value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4506,"desc":"timestamp type should be integer"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": -1,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4507,"desc":"timestamp value smaller than 0"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4517,"desc":"value not find"}@ then - return -1 -endi - -####### - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4508,"desc":"tags not find"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4509,"desc":"tags size is 0"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": 0}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4509,"desc":"tags size is 0"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","group1": "1","group1": "1","group1": "1","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbbbbbb","table":"sys_cpu_d_bbbbbbb_lga_1_1_1_1_1_web01","timestamp":1346846400,"value":18.000000,"tags":{"dc":"lga","group1":"1","group1":"1","group1":"1","group1":"1","group1":"1","host":"web01"},"status":"error","code":866,"desc":"failed to create table"}}],"failed":1,"success":0,"affected_rows":0}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"": "web01"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4512,"desc":"tag name is null"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host01123456789001123456789001123456789001123456789001123456789001123456789": "01"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4513,"desc":"tag name length too long"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web011234567890011234567890011234567890011234567890011234567890011234567890011234567890011234567890"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4516,"desc":"tag value can not more than 64"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": ""}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4515,"desc":"tag value is null"}@ then - return -1 -endi - -sleep 2000 - -print =============== step2 - insert single data -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400000,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbb","table":"sys_cpu_d_bbb_lga_1_web01","timestamp":1346846400000,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"affected_rows":1,"status":"succ"}}],"failed":0,"success":1,"affected_rows":1}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400000,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbb","table":"sys_cpu_d_bbb_lga_1_web01","timestamp":1346846400000,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"affected_rows":1,"status":"succ"}}],"failed":0,"success":1,"affected_rows":1}@ then - return -1 -endi - -system_content curl -u root:taosdata -d 'select * from db.sys_cpu_d_bbb_lga_1_web01' 127.0.0.1:7111/rest/sql/ -print $system_content -if $system_content != @{"status":"succ","head":["ts","value"],"column_meta":[["ts",9,8],["value",7,8]],"data":[["2012-09-05 20:00:00.000",18.000000000]],"rows":1}@ then - return -1 -endi - -print =============== step3 - multi-query data -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846405000,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846402000,"value": 18,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put - -print $system_content - -if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbb","table":"sys_cpu_d_bbb_lga_1_web01","timestamp":1346846405000,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"affected_rows":1,"status":"succ"}},{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbb","table":"sys_cpu_d_bbb_lga_1_web02","timestamp":1346846402000,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web02"},"affected_rows":1,"status":"succ"}}],"failed":0,"success":2,"affected_rows":2}@ then - return -1 -endi - -system_content curl -u root:taosdata -d 'select * from db.sys_cpu_d_bbb_lga_1_web01' 127.0.0.1:7111/rest/sql/ - -print $system_content - -if $system_content != @{"status":"succ","head":["ts","value"],"column_meta":[["ts",9,8],["value",7,8]],"data":[["2012-09-05 20:00:00.000",18.000000000],["2012-09-05 20:00:05.000",18.000000000]],"rows":2}@ then - return -1 -endi - -system_content curl -u root:taosdata -d 'select count(*) from db.sys_cpu_d_bbb' 127.0.0.1:7111/rest/sql/ - -print $system_content - -if $system_content != @{"status":"succ","head":["count(*)"],"column_meta":[["count(*)",5,8]],"data":[[3]],"rows":1}@ then - return -1 -endi - -print =============== step4 - summary-put data -system_content curl -u root:taosdata -d '[{"metric": "sys_mem","timestamp": 1346846400000,"value": 8,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_mem","timestamp": 1346846405000,"value": 9,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put?details=false - -print $system_content - -if $system_content != @{"failed":0,"success":2}@ then - return -1 -endi - -system_content curl -u root:taosdata -d 'select * from db.sys_mem_d_bbb_lga_1_web01' 127.0.0.1:7111/rest/sql/ - -print $system_content - -if $system_content != @{"status":"succ","head":["ts","value"],"column_meta":[["ts",9,8],["value",7,8]],"data":[["2012-09-05 20:00:00.000",8.000000000],["2012-09-05 20:00:05.000",9.000000000]],"rows":2}@ then - return -1 -endi - -system_content curl -u root:taosdata -d 'select count(*) from db.sys_mem_d_bbb' 127.0.0.1:7111/rest/sql/ - -print $system_content - -if $system_content != @{"status":"succ","head":["count(*)"],"column_meta":[["count(*)",5,8]],"data":[[2]],"rows":1}@ then - return -1 -endi - -print =============== step5 - prepare data - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846402000,"value": 19,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846402,"value": 19,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846403000,"value": 20,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846403,"value": 20,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846404000,"value": 21,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846404,"value": 21,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846405000,"value": 22,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846405,"value": 22,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846406000,"value": 23,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846406,"value": 23,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put - -system_content curl -u root:taosdata -d 'select count(*) from db.sys_cpu_d_bbb' 127.0.0.1:7111/rest/sql/ -print $system_content -if $system_content != @{"status":"succ","head":["count(*)"],"column_meta":[["count(*)",5,8]],"data":[[7]],"rows":1}@ then - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT -system sh/exec.sh -n dnode5 -s stop -x SIGINT -system sh/exec.sh -n dnode6 -s stop -x SIGINT -system sh/exec.sh -n dnode7 -s stop -x SIGINT -system sh/exec.sh -n dnode8 -s stop -x SIGINT diff --git a/tests/script/unique/http/testSuite.sim b/tests/script/unique/http/testSuite.sim deleted file mode 100644 index 3a9753e744b84bfea28e40e8b3554cb82d2ebb40..0000000000000000000000000000000000000000 --- a/tests/script/unique/http/testSuite.sim +++ /dev/null @@ -1,2 +0,0 @@ -run unique/http/admin.sim -run general/http/opentsdb.sim \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmt20.sim b/tests/script/unique/mnode/mgmt20.sim deleted file mode 100644 index 8945cffab226ab5dc379057d55e562f5c3ed9cfa..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/mgmt20.sim +++ /dev/null @@ -1,88 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 - -system sh/cfg.sh -n dnode1 -c monitor -v 1 -system sh/cfg.sh -n dnode2 -c monitor -v 1 - -print ============== step1 -system sh/exec.sh -n dnode1 -s start -system sh/exec.sh -n dnode2 -s start -sql connect - -print ============== step2 -sql create dnode $hostname2 - -$x = 0 -show2: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - goto show2 -endi -if $data2_2 != slave then - goto show2 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT - -print ============== step3 -system sh/exec.sh -n dnode2 -s start -sleep 10000 - -system sh/exec.sh -n dnode1 -s start -sql connect - -print =============== step4 -sql select * from log.dn1 -$d1_first = $rows -sql select * from log.dn2 -$d2_first = $rows - -$x = 0 -show4: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - goto show4 -endi -if $data2_2 != slave then - goto show4 -endi - -sleep 2000 -sql select * from log.dn1 -$d1_second = $rows -sql select * from log.dn2 -$d2_second = $rows - -print dnode1 $d1_first $d1_second -print dnode2 $d2_first $d2_second -if $d1_first >= $d1_second then - return -1 -endi - -if $d2_first >= $d2_second then - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmt21.sim b/tests/script/unique/mnode/mgmt21.sim deleted file mode 100644 index 8409383309dbde5500b9719cd64fd74ca5e384b2..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/mgmt21.sim +++ /dev/null @@ -1,44 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 - -print ============== step1 -system sh/exec.sh -n dnode2 -s start -sleep 10000 - -system sh/exec.sh -n dnode1 -s start -sql connect - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - return -1 -endi - -print ============== step2 -sql create dnode $hostname2 - -$x = 0 -show2: - $x = $x + 1 - sleep 2000 - if $x == 5 then - return -1 - endi - -sql show mnodes -x show2 -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - goto show2 -endi -if $data2_2 != slave then - goto show2 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmt22.sim b/tests/script/unique/mnode/mgmt22.sim deleted file mode 100644 index 399805312ba905d55bceffe011cfe074c831684e..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/mgmt22.sim +++ /dev/null @@ -1,114 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2 - -print ============== step1 -system sh/exec.sh -n dnode1 -s start -sql connect - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - return -1 -endi - -print ============== step2 -system sh/exec.sh -n dnode2 -s start -sql create dnode $hostname2 - -$x = 0 -show2: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - goto show2 -endi -if $data2_2 != slave then - goto show2 -endi - -print ============== step3 -sql_error drop dnode $hostname1 -x error1 -print should not drop master - -print ============== step4 -system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 3000 -sql_error show mnodes -print error of no master - -print ============== step5 -sql_error drop dnode $hostname1 -print error of no master - -print ============== step6 -system sh/exec.sh -n dnode1 -s start -sleep 2000 -sql close -sql connect - -$x = 0 -show6: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi - -sql show mnodes -x show6 -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - goto show6 -endi -if $data2_2 != slave then - goto show6 -endi - -print ============== step7 -system sh/exec.sh -n dnode3 -s start -sql create dnode $hostname3 - -$x = 0 -show7: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -print dnode3 ==> $data2_3 -if $data2_1 != master then - goto show7 -endi -if $data2_2 != slave then - goto show7 -endi -if $data3_3 != null then - goto show7 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT -system sh/exec.sh -n dnode5 -s stop -x SIGINT -system sh/exec.sh -n dnode6 -s stop -x SIGINT -system sh/exec.sh -n dnode7 -s stop -x SIGINT -system sh/exec.sh -n dnode8 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmt23.sim b/tests/script/unique/mnode/mgmt23.sim deleted file mode 100644 index 19c7b4ba762d4bf5a73c10c1afa39e927c7a1c91..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/mgmt23.sim +++ /dev/null @@ -1,141 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2 - -print ============== step1 -system sh/exec.sh -n dnode1 -s start -sql connect - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - return -1 -endi - -print ============== step2 -system sh/exec.sh -n dnode2 -s start -sql create dnode $hostname2 - -$x = 0 -show2: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - goto show2 -endi -if $data2_2 != slave then - goto show2 -endi - -print ============== step3 -system sh/exec.sh -n dnode3 -s start -sql create dnode $hostname3 -sleep 8000 - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - return -1 -endi -if $dnode2Role != slave then - return -1 -endi -if $dnode3Role != null then - return -1 -endi - -print ============== step4 -sql drop dnode $hostname2 - -$x = 0 -step4: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - goto step4 -endi -if $dnode2Role != null then - goto step4 -endi -if $dnode3Role != slave then - goto step4 -endi - -system sh/exec.sh -n dnode2 -s stop - -print ============== step5 -sleep 2000 -sql create dnode $hostname2 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 -system sh/exec.sh -n dnode2 -s start - -$x = 0 -step5: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - goto step5 -endi -if $dnode2Role != null then - goto step5 -endi -if $dnode3Role != slave then - goto step5 -endi - -print ============== step6 -system sh/exec.sh -n dnode1 -s stop -sql_error show mnodes - -print ============== step7 -sql_error drop dnode $hostname1 - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT -system sh/exec.sh -n dnode5 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmt24.sim b/tests/script/unique/mnode/mgmt24.sim deleted file mode 100644 index a7bcc59ac0bfa6163d1e2fddfd3a817b102bfa3c..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/mgmt24.sim +++ /dev/null @@ -1,84 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2 - -print ============== step1 -system sh/exec.sh -n dnode1 -s start -sql connect - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - return -1 -endi - -print ============== step2 -system sh/exec.sh -n dnode2 -s start -sql create dnode $hostname2 - -$x = 0 -show2: - $x = $x + 1 - sleep 2000 - if $x == 10 then - return -1 - endi - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - goto show2 -endi -if $data2_2 != slave then - goto show2 -endi - -print ============== step3 -system sh/exec.sh -n dnode1 -s stop -sleep 2000 -sql_error show mnodes - -print ============== step4 -sql_error drop dnode $hostname1 - -print ============== step5 -system sh/exec.sh -n dnode1 -s start -sql_error create dnode $hostname1 - -sql close -sql connect - -$x = 0 -step5: - $x = $x + 1 - sleep 2000 - if $x == 10 then - return -1 - endi - -sql show mnodes -x step5 - -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - goto step5 -endi -if $data2_2 != slave then - goto step5 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT -system sh/exec.sh -n dnode5 -s stop -x SIGINT -system sh/exec.sh -n dnode6 -s stop -x SIGINT -system sh/exec.sh -n dnode7 -s stop -x SIGINT -system sh/exec.sh -n dnode8 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmt25.sim b/tests/script/unique/mnode/mgmt25.sim deleted file mode 100644 index 9cca9c844806b138faf52186ffc3184d4876a1d6..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/mgmt25.sim +++ /dev/null @@ -1,95 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2 - -print ============== step1 -system sh/exec.sh -n dnode1 -s start -sql connect - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - return -1 -endi - -print ============== step2 -system sh/exec.sh -n dnode2 -s start -sql create dnode $hostname2 - -$x = 0 -show2: - $x = $x + 1 - sleep 2000 - if $x == 10 then - return -1 - endi - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - goto show2 -endi -if $data2_2 != slave then - goto show2 -endi - -print ============== step3 -system sh/exec.sh -n dnode3 -s start -sql create dnode $hostname3 -sleep 6000 - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - return -1 -endi -if $dnode2Role != slave then - return -1 -endi -if $dnode3Role != null then - return -1 -endi - -print ============== step4 -sql drop dnode $hostname2 -sleep 6000 - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - return -1 -endi -if $dnode2Role != null then - return -1 -endi -if $dnode3Role != slave then - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT -system sh/exec.sh -n dnode5 -s stop -x SIGINT -system sh/exec.sh -n dnode6 -s stop -x SIGINT -system sh/exec.sh -n dnode7 -s stop -x SIGINT -system sh/exec.sh -n dnode8 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmt26.sim b/tests/script/unique/mnode/mgmt26.sim deleted file mode 100644 index 2816845052e835cf11e0ec7d4ddc71cbdee0ada1..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/mgmt26.sim +++ /dev/null @@ -1,123 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2 - -print ============== step1 -system sh/exec.sh -n dnode1 -s start -sql connect - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - return -1 -endi - -print ============== step2 -system sh/exec.sh -n dnode2 -s start -sql create dnode $hostname2 - -$x = 0 -show2: - $x = $x + 1 - sleep 2000 - if $x == 10 then - return -1 - endi - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - goto show2 -endi -if $data2_2 != slave then - goto show2 -endi - -print ============== step3 -system sh/exec.sh -n dnode3 -s start -sql create dnode $hostname3 -sleep 6000 - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - return -1 -endi -if $dnode2Role != slave then - return -1 -endi -if $dnode3Role != null then - return -1 -endi - - -print ============== step4 -sql drop dnode $hostname2 -sleep 6000 - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - return -1 -endi -if $dnode2Role != null then - return -1 -endi -if $dnode3Role != slave then - return -1 -endi - -print ============== step5 -system sh/exec.sh -n dnode2 -s stop -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 -sleep 3000 -system sh/exec.sh -n dnode2 -s start -sql create dnode $hostname2 -sleep 6000 - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - return -1 -endi -if $dnode2Role != null then - return -1 -endi -if $dnode3Role != slave then - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT -system sh/exec.sh -n dnode5 -s stop -x SIGINT -system sh/exec.sh -n dnode6 -s stop -x SIGINT -system sh/exec.sh -n dnode7 -s stop -x SIGINT -system sh/exec.sh -n dnode8 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmt30.sim b/tests/script/unique/mnode/mgmt30.sim deleted file mode 100644 index d0858c0d6cdffa1cb1cd7f2ba570ae0521f412d5..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/mgmt30.sim +++ /dev/null @@ -1,68 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3 - -system sh/cfg.sh -n dnode1 -c balanceInterval -v 3000 -system sh/cfg.sh -n dnode2 -c balanceInterval -v 3000 -system sh/cfg.sh -n dnode3 -c balanceInterval -v 3000 - -print ============== step1 -system sh/exec.sh -n dnode1 -s start -sql connect - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -print dnode3 ==> $data3_3 -if $data2_1 != master then - return -1 -endi -if $data3_2 != null then - return -1 -endi -if $data3_3 != null then - return -1 -endi - -print ============== step2 -system sh/exec.sh -n dnode2 -s start -system sh/exec.sh -n dnode3 -s start -sleep 3000 - -sql create dnode $hostname2 -sql create dnode $hostname3 - -$x = 0 -step2: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - goto step2 -endi -if $dnode2Role != slave then - goto step2 -endi -if $dnode3Role != slave then - goto step2 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmt33.sim b/tests/script/unique/mnode/mgmt33.sim deleted file mode 100644 index ce7cdce35d8c0463564f46d26a0711d39340c8bf..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/mgmt33.sim +++ /dev/null @@ -1,214 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3 - -print ============== step1 -system sh/exec.sh -n dnode1 -s start -sql connect - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -print dnode3 ==> $data3_3 -if $data2_1 != master then - return -1 -endi -if $data3_2 != null then - return -1 -endi -if $data3_3 != null then - return -1 -endi - -print ============== step2 -system sh/exec.sh -n dnode2 -s start -sql create dnode $hostname2 - -$x = 0 -step2: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - goto step2 -endi -if $dnode2Role != slave then - goto step2 -endi -if $dnode3Role != null then - goto step2 -endi - -print ============== step3 -system sh/exec.sh -n dnode3 -s start -sql create dnode $hostname3 - -$x = 0 -step3: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - goto step3 -endi -if $dnode2Role != slave then - goto step3 -endi -if $dnode3Role != slave then - goto step3 -endi - -print ============== step4 -sql drop dnode $hostname2 - -$x = 0 -step4: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - goto step4 -endi -if $dnode2Role != null then - goto step4 -endi -if $dnode3Role != slave then - goto step4 -endi - -system sh/exec.sh -n dnode2 -s stop -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3 -system sh/exec.sh -n dnode2 -s start - -print ============== step5 -sql create dnode $hostname2 - -$x = 0 -step5: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_4 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - goto step5 -endi -if $dnode2Role != slave then - goto step5 -endi -if $dnode3Role != slave then - goto step5 -endi - -print ============== step6 -system sh/exec.sh -n dnode1 -s stop -$x = 0 -step6: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -x step6 -$dnode1Role = $data2_1 -$dnode2Role = $data2_4 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != offline then - goto step6 -endi -#if $dnode2Role != master then -# return -1 -#endi -#if $dnode3Role != slave then -# return -1 -#endi - -print ============== step7 -sql drop dnode $hostname1 -$x = 0 -step7: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -x step7 -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != null then - goto step7 -endi -#if $dnode2Role != master then -# return -1 -#endi -#if $dnode3Role != slave then -# return -1 -#endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT -system sh/exec.sh -n dnode5 -s stop -x SIGINT -system sh/exec.sh -n dnode6 -s stop -x SIGINT -system sh/exec.sh -n dnode7 -s stop -x SIGINT -system sh/exec.sh -n dnode8 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmt34.sim b/tests/script/unique/mnode/mgmt34.sim deleted file mode 100644 index d8a46b0955f59273279bbbc5c89c07c05db672d7..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/mgmt34.sim +++ /dev/null @@ -1,269 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 -system sh/deploy.sh -n dnode4 -i 4 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3 -system sh/cfg.sh -n dnode4 -c numOfMnodes -v 3 - -print ============== step1 -system sh/exec.sh -n dnode1 -s start -sql connect - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -print dnode3 ==> $data3_3 -if $data2_1 != master then - return -1 -endi -if $data3_2 != null then - return -1 -endi -if $data3_3 != null then - return -1 -endi - -print ============== step2 -system sh/exec.sh -n dnode2 -s start -sql create dnode $hostname2 -$x = 0 -step2: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -$dnode4Role = $data2_4 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role -print dnode4 ==> $dnode4Role - -if $dnode1Role != master then - goto step2 -endi -if $dnode2Role != slave then - goto step2 -endi -if $dnode3Role != null then - goto step2 -endi -if $dnode4Role != null then - goto step2 -endi - -print ============== step3 -system sh/exec.sh -n dnode3 -s start -sql create dnode $hostname3 - -$x = 0 -step3: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -$dnode4Role = $data2_4 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role -print dnode4 ==> $dnode4Role - -if $dnode1Role != master then - goto step3 -endi -if $dnode2Role != slave then - goto step3 -endi -if $dnode3Role != slave then - goto step3 -endi -if $dnode4Role != null then - goto step3 -endi - - -print ============== step4 -system sh/exec.sh -n dnode4 -s start -sql create dnode $hostname4 -$x = 0 -step4: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -$dnode4Role = $data2_4 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role -print dnode4 ==> $dnode4Role - -if $dnode1Role != master then - goto step4 -endi -if $dnode2Role != slave then - goto step4 -endi -if $dnode3Role != slave then - goto step4 -endi -if $dnode4Role != null then - goto step4 -endi - -print ============== step5 -sql drop dnode $hostname2 -$x = 0 -step5: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -$dnode4Role = $data2_4 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role -print dnode4 ==> $dnode4Role - -if $dnode1Role != master then - goto step5 -endi -if $dnode2Role != null then - goto step5 -endi -if $dnode3Role != slave then - goto step5 -endi -if $dnode4Role != slave then - goto step5 -endi - -system sh/exec.sh -n dnode2 -s stop -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3 -system sh/exec.sh -n dnode2 -s start - -print ============== step6 -sql create dnode $hostname2 -$x = 0 -step6: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -$dnode4Role = $data2_4 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role -print dnode4 ==> $dnode4Role - -if $dnode1Role != master then - goto step6 -endi -if $dnode2Role != null then - goto step6 -endi -if $dnode3Role != slave then - goto step6 -endi -if $dnode4Role != slave then - goto step6 -endi - -print ============== step7 -system sh/exec.sh -n dnode1 -s stop -$x = 0 -step7: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -x step7 -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -$dnode4Role = $data2_4 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role -print dnode4 ==> $dnode4Role - -if $dnode1Role != offline then - goto step7 -endi - -print ============== step8 -sql drop dnode $hostname1 -step8: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -x step8 -$dnode1Role = $data2_1 -$dnode2Role = $data2_5 -$dnode3Role = $data2_3 -$dnode4Role = $data2_4 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role -print dnode4 ==> $dnode4Role - -if $dnode1Role != null then - goto step8 -endi -if $dnode2Role != slave then - goto step8 -endi -#if $dnode3Role != master then -# return -1 -#endi -#if $dnode4Role != slave then -# return -1 -#endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT -system sh/exec.sh -n dnode5 -s stop -x SIGINT -system sh/exec.sh -n dnode6 -s stop -x SIGINT -system sh/exec.sh -n dnode7 -s stop -x SIGINT -system sh/exec.sh -n dnode8 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmtr2.sim b/tests/script/unique/mnode/mgmtr2.sim deleted file mode 100644 index 5afb41905846bff3ce9894e928245a7d34078354..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/mgmtr2.sim +++ /dev/null @@ -1,87 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2 - -print ============== step1 -system sh/exec.sh -n dnode1 -s start -sleep 2000 -sql connect - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - return -1 -endi -if $dnode2Role != null then - return -1 -endi -if $dnode3Role != null then - return -1 -endi - -print ============== step2 -sql create dnode $hostname2 -sql create dnode $hostname3 - -print ============== step3 -print ========= start dnode2 and dnode3 - -system sh/exec.sh -n dnode2 -s start -sleep 1000 -system sh/exec.sh -n dnode3 -s start - -sleep 8000 -system sh/exec.sh -n dnode2 -s stop -system sh/exec.sh -n dnode3 -s stop -sleep 4000 -system sh/exec.sh -n dnode2 -s start -system sh/exec.sh -n dnode3 -s start -sleep 4000 -system sh/exec.sh -n dnode2 -s stop -system sh/exec.sh -n dnode3 -s stop -sleep 4000 -system sh/exec.sh -n dnode2 -s start -system sh/exec.sh -n dnode3 -s start - -print ============== step4 -$x = 0 -step4: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi - -sql show mnodes - -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - goto step4 -endi -if $dnode2Role != slave then - goto step4 -endi -if $dnode3Role != null then - goto step4 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/mnode/testSuite.sim b/tests/script/unique/mnode/testSuite.sim deleted file mode 100644 index b9adbe06a282548d56d7e7feb8a36f99198d8c0d..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/testSuite.sim +++ /dev/null @@ -1,9 +0,0 @@ -run unique/mnode/mgmt21.sim -run unique/mnode/mgmt22.sim -run unique/mnode/mgmt23.sim -run unique/mnode/mgmt24.sim -run unique/mnode/mgmt25.sim -run unique/mnode/mgmt26.sim -run unique/mnode/mgmt33.sim -run unique/mnode/mgmt34.sim -run unique/mnode/mgmtr2.sim diff --git a/tests/script/unique/stream/metrics_balance.sim b/tests/script/unique/stream/metrics_balance.sim deleted file mode 100644 index ff48c2236709635c8d1a790104b0185144a96866..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/metrics_balance.sim +++ /dev/null @@ -1,312 +0,0 @@ -system sh/stop_dnodes.sh - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c statusInterval -v 1 -system sh/cfg.sh -n dnode2 -c statusInterval -v 1 -system sh/cfg.sh -n dnode1 -c balanceInterval -v 10 -system sh/cfg.sh -n dnode2 -c balanceInterval -v 10 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 0 -system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 0 -system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4 -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 - -$dbPrefix = db -$tbPrefix = tb -$mtPrefix = mt -$stPrefix = st -$tbNum = 3 -$rowNum = 200 - -print ========= start dnode1 -system sh/exec.sh -n dnode1 -s start -sql connect - -print ============== step1 -$db = $dbPrefix -sql create database $db -sql use $db - -$i = 0 -$st = $stPrefix . $i -$mt = $mtPrefix . $i -$tbNum = 3 -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - if $i == 0 then - sleep 2000 - endi - - $x = 0 - $y = 0 - while $y < $rowNum - $ms = $x . s - sql insert into $tb values (now + $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s) - -$st = $stPrefix . $i -$mt = $mtPrefix . $i -$tbNum = 6 -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - if $i == 0 then - sleep 2000 - endi - - $x = 0 - $y = 0 - while $y < $rowNum - $ms = $x . s - sql insert into $tb values (now + $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s) - -$st = $stPrefix . $i -$mt = $mtPrefix . $i -$tbNum = 9 -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - if $i == 0 then - sleep 2000 - endi - - $x = 0 - $y = 0 - while $y < $rowNum - $ms = $x . s - sql insert into $tb values (now + $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s) - -$st = $stPrefix . $i -$mt = $mtPrefix . $i -$tbNum = 12 -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - if $i == 0 then - sleep 2000 - endi - - $x = 0 - $y = 0 - while $y < $rowNum - $ms = $x . s - sql insert into $tb values (now + $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s) - - -print =============== step2 - -sql show tables -if $rows != 16 then - return -1 -endi - -print =============== step3 -print sleep 22 seconds -sleep 22000 - -$i = 0 -$mt = $mtPrefix . $i -$st = $stPrefix . $i -sql select * from $st -$r0 = $rows -print $st ==> $r0 $data00 $data01 $data10 $data11 - -$i = 3 -$mt = $mtPrefix . $i -$st = $stPrefix . $i -sql select * from $st -$r3 = $rows -print $st ==> $r3 $data00 $data01 $data10 $data11 - -$i = 6 -$mt = $mtPrefix . $i -$st = $stPrefix . $i -sql select * from $st -$r6 = $rows -print $st ==> $r6 $data00 $data01 $data10 $data11 - -$i = 9 -$mt = $mtPrefix . $i -$st = $stPrefix . $i -sql select * from $st -$r9 = $rows -print $st ==> $r9 $data00 $data01 $data10 $data11 - -print rows0=>$r0 rows3=>$r3 rows6=>$r6 rows9=>$r9 - -$x = 0 -show1: - $x = $x + 1 - sleep 2000 - if $x == 20 then - return -1 - endi -sql show dnodes -x show1 -$dnode1Vnodes = $data3_192.168.0.1 -print dnode1 $dnode1Vnodes -$dnode2Vnodes = $data3_192.168.0.2 -print dnode2 $dnode2Vnodes - -if $dnode1Vnodes != 0 then - goto show1 -endi -if $dnode2Vnodes != NULL then - goto show1 -endi - -print =============== step4 start dnode2 -sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start -sleep 8000 - -$x = 0 -show2: - $x = $x + 1 - sleep 2000 - if $x == 20 then - return -1 - endi -sql show dnodes -x show2 -$dnode1Vnodes = $data3_192.168.0.1 -print dnode1 $dnode1Vnodes -$dnode2Vnodes = $data3_192.168.0.2 -print dnode2 $dnode2Vnodes - -if $dnode1Vnodes != 2 then - goto show2 -endi -if $dnode2Vnodes != 2 then - goto show2 -endi - -print rows0=>$r0 rows3=>$r3 rows6=>$r6 rows9=>$r9 -print =============== step5 -print sleep 22 seconds -sleep 22000 - -print =============== step6 -$i = 0 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $tb -if $rows != $rowNum then - return -1 -endi - -$i = 3 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $tb -if $rows != $rowNum then - return -1 -endi - -$i = 6 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $tb -if $rows != $rowNum then - return -1 -endi - -$i = 9 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $tb -if $rows != $rowNum then - return -1 -endi - -print rows0=>$r0 rows3=>$r3 rows6=>$r6 rows9=>$r9 -print =============== step7 -$i = 0 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -print $st ==> $r0 $rows , $data00 $data01 $data10 $data11 -if $rows == 0 then - return -1 -endi -if $rows <= $r0 then - return -1 -endi - -$i = 3 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -print $st ==> $r3 $rows , $data00 $data01 $data10 $data11 -if $rows == 0 then - return -1 -endi -if $rows <= $r3 then - return -1 -endi - - -$i = 6 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -print $st ==> $r6 $rows , $data00 $data01 $data10 $data11 -if $rows == 0 then - return -1 -endi -if $rows <= $r6 then - return -1 -endi - -$i = 9 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -print $st ==> $r0 $rows , $data00 $data01 $data10 $data11 -if $rows == 0 then - return -1 -endi -if $rows <= $r9 then - return -1 -endi - -print =============== clear -system sh/exec.sh -n dnode1 -s stop -system sh/exec.sh -n dnode2 -s stop - diff --git a/tests/script/unique/stream/metrics_replica1_dnode2.sim b/tests/script/unique/stream/metrics_replica1_dnode2.sim deleted file mode 100644 index 20c37cefc39f8fa6393d49934adb046f409fca25..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/metrics_replica1_dnode2.sim +++ /dev/null @@ -1,260 +0,0 @@ -system sh/stop_dnodes.sh - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -system sh/exec.sh -n dnode1 -s start - -sql connect - -sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi -print ======================== dnode1 start - -$dbPrefix = m1d_db -$tbPrefix = m1d_tb -$mtPrefix = m1d_mt -$stPrefix = m1d_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 - -sql select count(*) from $mt interval(1d) -print select count(*) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $mt interval(1d) - -print =============== step3 c2 -sql select count(tbcol) from $mt interval(1d) -print select count(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql create table $st as select count(tbcol) from $mt interval(1d) - -print =============== step4 c3 -sql select count(tbcol2) from $mt interval(1d) -print select count(tbcol2) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(tbcol2) from $mt interval(1d) - -print =============== step5 avg -sql select avg(tbcol) from $mt interval(1d) -print select avg(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . av -sql create table $st as select avg(tbcol) from $mt interval(1d) - -print =============== step6 su -sql select sum(tbcol) from $mt interval(1d) -print select sum(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . su -sql create table $st as select sum(tbcol) from $mt interval(1d) - -print =============== step7 mi -sql select min(tbcol) from $mt interval(1d) -print select min(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . mi -sql create table $st as select min(tbcol) from $mt interval(1d) - -print =============== step8 ma -sql select max(tbcol) from $mt interval(1d) -print select max(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . ma -sql create table $st as select max(tbcol) from $mt interval(1d) - -print =============== step9 fi -sql select first(tbcol) from $mt interval(1d) -print select first(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . fi -sql create table $st as select first(tbcol) from $mt interval(1d) - -print =============== step10 la -sql select last(tbcol) from $mt interval(1d) -print select last(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . la -sql create table $st as select last(tbcol) from $mt interval(1d) - -print =============== step11 wh -sql select count(tbcol) from $mt where ts < now + 4m interval(1d) -print select count(tbcol) from $mt where ts < now + 4m interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . wh -#sql create table $st as select count(tbcol) from $mt where ts < now + 4m interval(1d) - -print =============== step12 as -sql select count(tbcol) from $mt interval(1d) -print select count(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . as -sql create table $st as select count(tbcol) as c from $mt interval(1d) - -print =============== step13 -print sleep 22 seconds -sleep 32000 - -print =============== step14 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . av -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . su -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . mi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . ma -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . fi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . la -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . wh -#sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != 200 then -# return -1 -#endi - -$st = $stPrefix . as -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi diff --git a/tests/script/unique/stream/metrics_replica2_dnode2.sim b/tests/script/unique/stream/metrics_replica2_dnode2.sim deleted file mode 100644 index aa8c1871017982cecc695abc8f64d732a8a7fc4e..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/metrics_replica2_dnode2.sim +++ /dev/null @@ -1,260 +0,0 @@ -system sh/stop_dnodes.sh - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/exec.sh -n dnode1 -s start - -sql connect - -sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi - - -print ======================== dnode1 start - -$dbPrefix = m2d_db -$tbPrefix = m2d_tb -$mtPrefix = m2d_mt -$stPrefix = m2d_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db replica 2 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 - -sql select count(*) from $mt interval(1d) -print select count(*) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $mt interval(1d) - -print =============== step3 c2 -sql select count(tbcol) from $mt interval(1d) -print select count(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql create table $st as select count(tbcol) from $mt interval(1d) - -print =============== step4 c3 -sql select count(tbcol2) from $mt interval(1d) -print select count(tbcol2) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(tbcol2) from $mt interval(1d) - -print =============== step5 avg -sql select avg(tbcol) from $mt interval(1d) -print select avg(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . av -sql create table $st as select avg(tbcol) from $mt interval(1d) - -print =============== step6 su -sql select sum(tbcol) from $mt interval(1d) -print select sum(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . su -sql create table $st as select sum(tbcol) from $mt interval(1d) - -print =============== step7 mi -sql select min(tbcol) from $mt interval(1d) -print select min(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . mi -sql create table $st as select min(tbcol) from $mt interval(1d) - -print =============== step8 ma -sql select max(tbcol) from $mt interval(1d) -print select max(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . ma -sql create table $st as select max(tbcol) from $mt interval(1d) - -print =============== step9 fi -sql select first(tbcol) from $mt interval(1d) -print select first(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . fi -sql create table $st as select first(tbcol) from $mt interval(1d) - -print =============== step10 la -sql select last(tbcol) from $mt interval(1d) -print select last(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . la -sql create table $st as select last(tbcol) from $mt interval(1d) - -print =============== step11 wh -sql select count(tbcol) from $mt where ts < now + 4m interval(1d) -print select count(tbcol) from $mt where ts < now + 4m interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . wh -#sql create table $st as select count(tbcol) from $mt where ts < now + 4m interval(1d) - -print =============== step12 as -sql select count(tbcol) from $mt interval(1d) -print select count(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . as -sql create table $st as select count(tbcol) as c from $mt interval(1d) - -print =============== step13 -print sleep 22 seconds -sleep 22000 - -print =============== step14 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . av -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . su -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . mi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . ma -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . fi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . la -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . wh -#sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != 200 then -# return -1 -#endi - -$st = $stPrefix . as -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi diff --git a/tests/script/unique/stream/metrics_replica2_dnode2_vnoden.sim b/tests/script/unique/stream/metrics_replica2_dnode2_vnoden.sim deleted file mode 100644 index be2fcefe66ed6ca2e24a44cd22fa072201137b89..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/metrics_replica2_dnode2_vnoden.sim +++ /dev/null @@ -1,261 +0,0 @@ -system sh/stop_dnodes.sh - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -system sh/exec.sh -n dnode1 -s start - -sql connect - -sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi - -print ======================== dnode1 start - -$dbPrefix = m2dv_db -$tbPrefix = m2dv_tb -$mtPrefix = m2dv_mt -$stPrefix = m2dv_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db replica 2 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 - -sql select count(*) from $mt interval(1d) -print select count(*) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $mt interval(1d) - -print =============== step3 c2 -sql select count(tbcol) from $mt interval(1d) -print select count(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql create table $st as select count(tbcol) from $mt interval(1d) - -print =============== step4 c3 -sql select count(tbcol2) from $mt interval(1d) -print select count(tbcol2) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(tbcol2) from $mt interval(1d) - -print =============== step5 avg -sql select avg(tbcol) from $mt interval(1d) -print select avg(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . av -sql create table $st as select avg(tbcol) from $mt interval(1d) - -print =============== step6 su -sql select sum(tbcol) from $mt interval(1d) -print select sum(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . su -sql create table $st as select sum(tbcol) from $mt interval(1d) - -print =============== step7 mi -sql select min(tbcol) from $mt interval(1d) -print select min(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . mi -sql create table $st as select min(tbcol) from $mt interval(1d) - -print =============== step8 ma -sql select max(tbcol) from $mt interval(1d) -print select max(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . ma -sql create table $st as select max(tbcol) from $mt interval(1d) - -print =============== step9 fi -sql select first(tbcol) from $mt interval(1d) -print select first(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . fi -sql create table $st as select first(tbcol) from $mt interval(1d) - -print =============== step10 la -sql select last(tbcol) from $mt interval(1d) -print select last(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . la -sql create table $st as select last(tbcol) from $mt interval(1d) - -print =============== step11 wh -sql select count(tbcol) from $mt where ts < now + 4m interval(1d) -print select count(tbcol) from $mt where ts < now + 4m interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . wh -#sql create table $st as select count(tbcol) from $mt where ts < now + 4m interval(1d) - -print =============== step12 as -sql select count(tbcol) from $mt interval(1d) -print select count(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . as -sql create table $st as select count(tbcol) as c from $mt interval(1d) - -print =============== step13 -print sleep 22 seconds -sleep 22000 - -print =============== step14 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . av -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . su -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . mi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . ma -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . fi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . la -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . wh -#sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != 200 then -# return -1 -#endi - -$st = $stPrefix . as -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi diff --git a/tests/script/unique/stream/metrics_replica2_dnode3.sim b/tests/script/unique/stream/metrics_replica2_dnode3.sim deleted file mode 100644 index f7b17610c380d9f90a2cefd4af86ea766facdffa..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/metrics_replica2_dnode3.sim +++ /dev/null @@ -1,270 +0,0 @@ -system sh/stop_dnodes.sh - - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode3 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 -system sh/exec.sh -n dnode1 -s start - -sql connect - -sql create dnode $hostname2 -sql create dnode $hostname3 -system sh/exec.sh -n dnode2 -s start -system sh/exec.sh -n dnode3 -s start -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi -if $data4_192.168.0.3 == offline then - goto createDnode -endi - -print ======================== dnode1 start - -$dbPrefix = m2d3_db -$tbPrefix = m2d3_tb -$mtPrefix = m2d3_mt -$stPrefix = m2d3_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db replica 2 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 - -sql select count(*) from $mt interval(1d) -print select count(*) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $mt interval(1d) - -print =============== step3 c2 -sql select count(tbcol) from $mt interval(1d) -print select count(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql create table $st as select count(tbcol) from $mt interval(1d) - -print =============== step4 c3 -sql select count(tbcol2) from $mt interval(1d) -print select count(tbcol2) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(tbcol2) from $mt interval(1d) - -print =============== step5 avg -sql select avg(tbcol) from $mt interval(1d) -print select avg(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . av -sql create table $st as select avg(tbcol) from $mt interval(1d) - -print =============== step6 su -sql select sum(tbcol) from $mt interval(1d) -print select sum(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . su -sql create table $st as select sum(tbcol) from $mt interval(1d) - -print =============== step7 mi -sql select min(tbcol) from $mt interval(1d) -print select min(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . mi -sql create table $st as select min(tbcol) from $mt interval(1d) - -print =============== step8 ma -sql select max(tbcol) from $mt interval(1d) -print select max(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . ma -sql create table $st as select max(tbcol) from $mt interval(1d) - -print =============== step9 fi -sql select first(tbcol) from $mt interval(1d) -print select first(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . fi -sql create table $st as select first(tbcol) from $mt interval(1d) - -print =============== step10 la -sql select last(tbcol) from $mt interval(1d) -print select last(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . la -sql create table $st as select last(tbcol) from $mt interval(1d) - -print =============== step11 wh -sql select count(tbcol) from $mt where ts < now + 4m interval(1d) -print select count(tbcol) from $mt where ts < now + 4m interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . wh -#sql create table $st as select count(tbcol) from $mt where ts < now + 4m interval(1d) - -print =============== step12 as -sql select count(tbcol) from $mt interval(1d) -print select count(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . as -sql create table $st as select count(tbcol) as c from $mt interval(1d) - -print =============== step13 -print sleep 22 seconds -sleep 22000 - -print =============== step14 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . av -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . su -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . mi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . ma -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . fi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . la -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . wh -#sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != 200 then -# return -1 -#endi - -$st = $stPrefix . as -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi diff --git a/tests/script/unique/stream/metrics_replica3_dnode4.sim b/tests/script/unique/stream/metrics_replica3_dnode4.sim deleted file mode 100644 index 402712800313ff5b96f970d12ffe007f77bc26f7..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/metrics_replica3_dnode4.sim +++ /dev/null @@ -1,280 +0,0 @@ -system sh/stop_dnodes.sh - - - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 -system sh/deploy.sh -n dnode4 -i 4 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode3 -c walLevel -v 1 -system sh/cfg.sh -n dnode4 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 -system sh/exec.sh -n dnode1 -s start - -sql connect - -sql create dnode $hostname2 -sql create dnode $hostname3 -sql create dnode $hostname4 -system sh/exec.sh -n dnode2 -s start -system sh/exec.sh -n dnode3 -s start -system sh/exec.sh -n dnode4 -s start - -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi -if $data4_192.168.0.3 == offline then - goto createDnode -endi -if $data4_192.168.0.4 == offline then - goto createDnode -endi - -print ======================== dnode1 start - -$dbPrefix = m2d3_db -$tbPrefix = m2d3_tb -$mtPrefix = m2d3_mt -$stPrefix = m2d3_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db replica 2 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 - -sql select count(*) from $mt interval(1d) -print select count(*) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $mt interval(1d) - -print =============== step3 c2 -sql select count(tbcol) from $mt interval(1d) -print select count(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql create table $st as select count(tbcol) from $mt interval(1d) - -print =============== step4 c3 -sql select count(tbcol2) from $mt interval(1d) -print select count(tbcol2) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(tbcol2) from $mt interval(1d) - -print =============== step5 avg -sql select avg(tbcol) from $mt interval(1d) -print select avg(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . av -sql create table $st as select avg(tbcol) from $mt interval(1d) - -print =============== step6 su -sql select sum(tbcol) from $mt interval(1d) -print select sum(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . su -sql create table $st as select sum(tbcol) from $mt interval(1d) - -print =============== step7 mi -sql select min(tbcol) from $mt interval(1d) -print select min(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . mi -sql create table $st as select min(tbcol) from $mt interval(1d) - -print =============== step8 ma -sql select max(tbcol) from $mt interval(1d) -print select max(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . ma -sql create table $st as select max(tbcol) from $mt interval(1d) - -print =============== step9 fi -sql select first(tbcol) from $mt interval(1d) -print select first(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . fi -sql create table $st as select first(tbcol) from $mt interval(1d) - -print =============== step10 la -sql select last(tbcol) from $mt interval(1d) -print select last(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . la -sql create table $st as select last(tbcol) from $mt interval(1d) - -print =============== step11 wh -sql select count(tbcol) from $mt where ts < now + 4m interval(1d) -print select count(tbcol) from $mt where ts < now + 4m interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . wh -#sql create table $st as select count(tbcol) from $mt where ts < now + 4m interval(1d) - -print =============== step12 as -sql select count(tbcol) from $mt interval(1d) -print select count(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . as -sql create table $st as select count(tbcol) as c from $mt interval(1d) - -print =============== step13 -print sleep 22 seconds -sleep 22000 - -print =============== step14 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . av -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . su -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . mi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . ma -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . fi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . la -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . wh -#sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != 200 then -# return -1 -#endi - -$st = $stPrefix . as -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi diff --git a/tests/script/unique/stream/metrics_vnode_stop.sim b/tests/script/unique/stream/metrics_vnode_stop.sim deleted file mode 100644 index cd84cb3cdf5f8096f4986a222cc371db3900f765..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/metrics_vnode_stop.sim +++ /dev/null @@ -1,188 +0,0 @@ -system sh/stop_dnodes.sh - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 -system sh/exec.sh -n dnode1 -s start - -sql connect - -sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi -print ======================== dnode start - -$dbPrefix = db -$tbPrefix = tb -$mtPrefix = mt -$stPrefix = st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql create database $db replica 2 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 -$i = 1 -$tb = $tbPrefix . $i - -sql select count(*) from $mt interval(1d) -print select count(*) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $mt interval(1d) - -print =============== step3 -system sh/exec.sh -n dnode2 -s stop - -print =============== step4 -print sleep 22 seconds -sleep 22000 - -print =============== step5 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -print ============= step6 - -sql close -system sh/exec.sh -n dnode1 -s stop -system sh/exec.sh -n dnode2 -s stop -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/exec.sh -n dnode2 -s start -sleep 2000 - -$x = 0 -connectTbase2: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql connect -x connectTbase2 -sleep 2000 - -sql create dnode $hostname1 -system sh/exec.sh -n dnode1 -s start -sleep 2000 -print ======================== dnode start - -$dbPrefix = db -$tbPrefix = tb -$mtPrefix = mt -$stPrefix = st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step7 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql create database $db replica 2 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step8 c1 -$i = 1 -$tb = $tbPrefix . $i - -sql select count(*) from $mt interval(1d) -print select count(*) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $mt interval(1d) - -print =============== step9 -system sh/exec.sh -n dnode1 -s stop - -print =============== step10 -print sleep 22 seconds -sleep 22000 - -print =============== step11 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - - - diff --git a/tests/script/unique/stream/table_balance.sim b/tests/script/unique/stream/table_balance.sim deleted file mode 100644 index 45e054e2efdfbd7f3d01e3a860c5ac227f3327fc..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/table_balance.sim +++ /dev/null @@ -1,238 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c statusInterval -v 1 -system sh/cfg.sh -n dnode2 -c statusInterval -v 1 -system sh/cfg.sh -n dnode1 -c balanceInterval -v 10 -system sh/cfg.sh -n dnode2 -c balanceInterval -v 10 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 0 -system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 0 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 - -$dbPrefix = tb_db -$tbPrefix = tb_tb -$mtPrefix = tb_mt -$stPrefix = tb_st -$tbNum = 10 -$rowNum = 200 -$totalNum = 200 - -print ========= start dnode1 -system sh/exec.sh -n dnode1 -s start - -sql connect - -print ============== step1 -$i = 0 -$db = $dbPrefix -$mt = $mtPrefix -$st = $stPrefix . $i - -sql create database $db -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - if $i == 0 then - sleep 2000 - endi - - $x = 0 - $y = 0 - while $y < $rowNum - $ms = $x . s - sql insert into $tb values (now + $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 - -$i = 1 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s) - -$i = 5 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s) - -$i = 8 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s) - -sql show tables -if $rows != 13 then - return -1 -endi - -print =============== step3 -print sleep 22 seconds -sleep 22000 - -$i = 1 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -$r1 = $rows -print $st ==> $r1 $data00 $data01 $data10 $data11 - -$i = 5 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -$r5 = $rows -print $st ==> $r5 $data00 $data01 $data10 $data11 - -$i = 8 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -$r8 = $rows -print $st ==> $r8 $data00 $data01 $data10 $data11 - -print rows1=>$r1 rows5=>$r5 rows8=>$r8 - -$x = 0 -show1: - $x = $x + 1 - sleep 2000 - if $x == 20 then - return -1 - endi -sql show dnodes -x show1 -$dnode1Vnodes = $data3_192.168.0.1 -print dnode1 $dnode1Vnodes -$dnode2Vnodes = $data3_192.168.0.2 -print dnode2 $dnode2Vnodes - -if $dnode1Vnodes != 0 then - goto show1 -endi -if $dnode2Vnodes != NULL then - goto show1 -endi - -print =============== step4 start dnode2 -sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start -sleep 8000 - -$x = 0 -show2: - $x = $x + 1 - sleep 2000 - if $x == 20 then - return -1 - endi -sql show dnodes -x show2 -$dnode1Vnodes = $data3_192.168.0.1 -print dnode1 $dnode1Vnodes -$dnode2Vnodes = $data3_192.168.0.2 -print dnode2 $dnode2Vnodes - -if $dnode1Vnodes != 2 then - goto show2 -endi -if $dnode2Vnodes != 2 then - goto show2 -endi - -print rows1=>$r1 rows5=>$r5 rows8=>$r8 -print =============== step5 -print sleep 22 seconds -sleep 22000 - -print =============== step6 -$i = 1 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $tb -if $rows != $rowNum then - return -1 -endi - -$i = 5 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $tb -if $rows != $rowNum then - return -1 -endi - -$i = 8 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $tb -if $rows != $rowNum then - return -1 -endi - -print rows1=>$r1 rows5=>$r5 rows8=>$r8 -print =============== step7 -$i = 1 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -print $st ==> $r1 $rows , $data00 $data01 $data10 $data11 -if $rows == 0 then - return -1 -endi -if $rows <= $r1 then - return -1 -endi - -$i = 5 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -print $st ==> $r5 $rows , $data00 $data01 $data10 $data11 -if $rows == 0 then - return -1 -endi -if $rows <= $r5 then - return -1 -endi - - -$i = 8 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -print $st ==> $r8 $rows , $data00 $data01 $data10 $data11 -if $rows == 0 then - return -1 -endi -if $rows <= $r8 then - return -1 -endi - - -if $r1 != $r5 then - return -1 -endi - -if $r8 != $r5 then - return -1 -endi - -print =============== clear -system sh/exec.sh -n dnode1 -s stop -system sh/exec.sh -n dnode2 -s stop - diff --git a/tests/script/unique/stream/table_move.sim b/tests/script/unique/stream/table_move.sim deleted file mode 100644 index 964a0c025363fd650e8051312a812fffbddaea7d..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/table_move.sim +++ /dev/null @@ -1,269 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 -system sh/deploy.sh -n dnode4 -i 4 - -system sh/cfg.sh -n dnode1 -c statusInterval -v 1 -system sh/cfg.sh -n dnode2 -c statusInterval -v 1 -system sh/cfg.sh -n dnode3 -c statusInterval -v 1 -system sh/cfg.sh -n dnode4 -c statusInterval -v 1 - -system sh/cfg.sh -n dnode1 -c balanceInterval -v 10 -system sh/cfg.sh -n dnode2 -c balanceInterval -v 10 -system sh/cfg.sh -n dnode3 -c balanceInterval -v 10 -system sh/cfg.sh -n dnode4 -c balanceInterval -v 10 - -system sh/cfg.sh -n dnode1 -c wallevel -v 1 -system sh/cfg.sh -n dnode2 -c wallevel -v 1 -system sh/cfg.sh -n dnode3 -c wallevel -v 1 -system sh/cfg.sh -n dnode4 -c wallevel -v 1 - -system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 0 -system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 0 -system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 0 -system sh/cfg.sh -n dnode4 -c mnodeEqualVnodeNum -v 0 - -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 - -system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4 -system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 4 -system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 4 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3 -system sh/cfg.sh -n dnode4 -c numOfMnodes -v 3 - -system sh/cfg.sh -n dnode1 -c maxVnodeConnections -v 1000 -system sh/cfg.sh -n dnode1 -c maxMeterConnections -v 1000 -system sh/cfg.sh -n dnode1 -c maxShellConns -v 1000 -system sh/cfg.sh -n dnode1 -c maxMgmtConnections -v 1000 - -system sh/cfg.sh -n dnode2 -c maxVnodeConnections -v 1000 -system sh/cfg.sh -n dnode2 -c maxMeterConnections -v 1000 -system sh/cfg.sh -n dnode2 -c maxShellConns -v 1000 -system sh/cfg.sh -n dnode2 -c maxMgmtConnections -v 1000 - -$dbPrefix = db -$tbPrefix = tb -$mtPrefix = mt -$stPrefix = st -$tbNum = 5 -$rowNum = 20 -$totalNum = 200 - -print ============== step1 -print ========= start dnode1 -system sh/exec.sh -n dnode1 -s start - -sql connect -sleep 2000 - -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql create database $db -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -20 - $y = 0 - while $y < $rowNum - $ms = $x . s - sql insert into $tb values (now $ms , $x , $x ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 -$i = 0 -$tb = $tbPrefix . $i -$st = $stPrefix . $i - -sql select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) -print select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) ===> $data00 $data01 $data02, $data03 -if $data01 != $rowNum then - return -1 -endi -if $data02 != $rowNum then - return -1 -endi -if $data03 != $rowNum then - return -1 -endi - -sql show tables -if $rows != 5 then - return -1 -endi - -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s) - -sql show tables -if $rows != 6 then - return -1 -endi - -print =============== step3 -print sleep 22 seconds -sleep 22000 - -sql select * from $tb -if $rows != 20 then - return -1 -endi - -sql select * from $mt -if $rows != 100 then - return -1 -endi - -sql select * from $st -print select * from $st => $data01 -if $rows == 0 then - return -1 -endi - -$x = 0 -show1: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes -x show1 -$dnode1Vnodes = $data3_192.168.0.1 -print dnode1 $dnode1Vnodes -$dnode2Vnodes = $data3_192.168.0.2 -print dnode2 $dnode2Vnodes - -if $dnode1Vnodes != 6 then - goto show1 -endi -if $dnode2Vnodes != NULL then - goto show1 -endi - -print =============== step4 start dnode2 -sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start -sleep 8000 - -$x = 0 -show2: - $x = $x + 1 - sleep 2000 - if $x == 20 then - return -1 - endi -sql show dnodes -x show2 -$dnode1Vnodes = $data3_192.168.0.1 -print dnode1 $dnode1Vnodes -$dnode2Vnodes = $data3_192.168.0.2 -print dnode2 $dnode2Vnodes - -if $dnode1Vnodes != 7 then - goto show2 -endi -if $dnode2Vnodes != 7 then - goto show2 -endi - -print =============== step5 drop dnode1 -system sh/exec.sh -n dnode1 -s stop -print stop dnode1 and sleep 10000 -sleep 10000 - -sql drop dnode $hostname1 -print drop dnode1 and sleep 9000 -sleep 9000 - -$x = 0 -show6: - $x = $x + 1 - sleep 2000 - if $x == 20 then - return -1 - endi -sql show dnodes -x show6 -$dnode1Vnodes = $data3_192.168.0.1 -print dnode1 $dnode1Vnodes -$dnode2Vnodes = $data3_192.168.0.2 -print dnode2 $dnode2Vnodes - -if $dnode1Vnodes != NULL then - goto show6 -endi -if $dnode2Vnodes != 6 then - goto show6 -endi - -print =============== step6 - -print select * from $tb -sql select * from $tb -if $rows != 20 then - return -1 -endi - -print select * from $mt -sql select * from $mt -if $rows != 80 then - return -1 -endi - - -print =============== step7 -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - - $x = 0 - $y = 0 - while $y < $rowNum - $ms = $x . s - sql insert into $tb values (now + $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -$i = 0 -$tb = $tbPrefix . $i -$st = $stPrefix . $i - -print =============== step8 -print sleep 22 seconds -sleep 22000 - -print select * from $st -sql select * from $st -if $rows == 0 then - return -1 -endi - - -print =============== clear -system sh/exec.sh -n dnode1 -s stop -system sh/exec.sh -n dnode2 -s stop - diff --git a/tests/script/unique/stream/table_replica1_dnode2.sim b/tests/script/unique/stream/table_replica1_dnode2.sim deleted file mode 100644 index ccc6026e9c92975ccdd4fd12366a11f50a818d3f..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/table_replica1_dnode2.sim +++ /dev/null @@ -1,137 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -system sh/exec.sh -n dnode1 -s start - -sql connect - -sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi -print ======================== dnode1 start - -$dbPrefix = t1d_db -$tbPrefix = t1d_tb -$mtPrefix = t1d_mt -$stPrefix = t1d_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 - -$i = 1 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) -print select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) --> $data00 $data01 $data02 $data03 -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) - -$i = 5 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) -print select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) --> $data00 $data01 $data02 $data03 -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) - -$i = 8 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) -print select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) --> $data00 $data01 $data02 $data03 -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) - -sql show tables -if $rows != 13 then - return -1 -endi - -print =============== step3 -print sleep 22 seconds -sleep 22000 - - -print =============== step4 -$i = 1 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -print $st ==> $rows $data00 $data01 $data10 $data11 -$rows1 = $rows -if $data01 != 20 then - return -1 -endi - -$i = 5 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -print $st => $rows $data00 $data01 $data10 $data11 -$rows5 = $rows -if $data01 != 20 then - return -1 -endi - -$i = 8 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -print $st ==> $rows $data00 $data01 $data10 $data11 -$rows8 = $rows -if $data01 != 20 then - return -1 -endi - -if $rows8 != $rows5 then - return -1 -endi - -if $rows8 != $rows1 then - return -1 -endi \ No newline at end of file diff --git a/tests/script/unique/stream/table_replica2_dnode2.sim b/tests/script/unique/stream/table_replica2_dnode2.sim deleted file mode 100644 index 947fa0d2f9093c802a9c99c74edddeffca102d38..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/table_replica2_dnode2.sim +++ /dev/null @@ -1,312 +0,0 @@ -system sh/stop_dnodes.sh - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/exec.sh -n dnode1 -s start - -sql connect - -sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi -print ======================== dnode1 start - -$dbPrefix = t2d_db -$tbPrefix = t2d_tb -$mtPrefix = t2d_mt -$stPrefix = t2d_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop database $db -x step1 -step1: -sql create database $db replica 2 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 -$i = 1 -$tb = $tbPrefix . $i - -sql select count(*) from $tb interval(1d) -print select count(*) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $tb interval(1d) - -print =============== step3 c2 -sql select count(tbcol) from $tb interval(1d) -print select count(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c2 -sql create table $st as select count(tbcol) from $tb interval(1d) - -print =============== step4 c3 -sql select count(tbcol2) from $tb interval(1d) -print select count(tbcol2) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(tbcol2) from $tb interval(1d) - -print =============== step5 avg -sql select avg(tbcol) from $tb interval(1d) -print select avg(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . av -sql create table $st as select avg(tbcol) from $tb interval(1d) - -print =============== step6 su -sql select sum(tbcol) from $tb interval(1d) -print select sum(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 190 then - return -1 -endi - -$st = $stPrefix . su -sql create table $st as select sum(tbcol) from $tb interval(1d) - -print =============== step7 mi -sql select min(tbcol) from $tb interval(1d) -print select min(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . mi -sql create table $st as select min(tbcol) from $tb interval(1d) - -print =============== step8 ma -sql select max(tbcol) from $tb interval(1d) -print select max(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . ma -sql create table $st as select max(tbcol) from $tb interval(1d) - -print =============== step9 fi -sql select first(tbcol) from $tb interval(1d) -print select first(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . fi -sql create table $st as select first(tbcol) from $tb interval(1d) - -print =============== step10 la -sql select last(tbcol) from $tb interval(1d) -print select last(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . la -sql create table $st as select last(tbcol) from $tb interval(1d) - -print =============== step11 st -sql select stddev(tbcol) from $tb interval(1d) -print select stddev(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 5.766281297 then - return -1 -endi - -$st = $stPrefix . std -sql create table $st as select stddev(tbcol) from $tb interval(1d) - -print =============== step12 le -sql select leastsquares(tbcol, 1, 1) from $tb interval(1d) -print select leastsquares(tbcol, 1, 1) from $tb interval(1d) ===> $data00 $data01 -#if $data01 != @(0.000017, -25362055.126740)@ then -# return -1 -#endi - -$st = $stPrefix . le -sql create table $st as select leastsquares(tbcol, 1, 1) from $tb interval(1d) - -print =============== step13 pe - -sql select percentile(tbcol, 1) from $tb interval(1d) -print select percentile(tbcol, 1) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0.190000000 then - return -1 -endi - -$st = $stPrefix . pe -sql create table $st as select percentile(tbcol, 1) from $tb interval(1d) - -print =============== step14 wh -sql select count(tbcol) from $tb where ts < now + 4m interval(1d) -print select count(tbcol) from $tb where ts < now + 4m interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . wh -#sql create table $st as select count(tbcol) from $tb where ts < now + 4m interval(1d) - -print =============== step15 as -sql select count(tbcol) from $tb interval(1d) -print select count(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . as -sql create table $st as select count(tbcol) as c from $tb interval(1d) - -print =============== step16 -print sleep 22 seconds -sleep 22000 - -print =============== step17 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c2 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . av -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . su -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 190 then - return -1 -endi - -$st = $stPrefix . mi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . ma -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . fi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . la -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . std -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 5.766281297 then - return -1 -endi - -$st = $stPrefix . le -sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != @(0.000017, -25270086.331047)@ then -# return -1 -#endi - -$st = $stPrefix . pe -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0.190000000 then - return -1 -endi - -$st = $stPrefix . wh -#sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != $rowNum then -# return -1 -#endi - -$st = $stPrefix . as -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi diff --git a/tests/script/unique/stream/table_replica2_dnode2_vnoden.sim b/tests/script/unique/stream/table_replica2_dnode2_vnoden.sim deleted file mode 100644 index 75300362393eaa543740307d4d11f9a4eabbbc50..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/table_replica2_dnode2_vnoden.sim +++ /dev/null @@ -1,314 +0,0 @@ -system sh/stop_dnodes.sh - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -system sh/exec.sh -n dnode1 -s start - -sql connect - -sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi -print ======================== dnode1 start - -$dbPrefix = t2dv_db -$tbPrefix = t2dv_tb -$mtPrefix = t2dv_mt -$stPrefix = t2dv_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db replica 2 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 -$i = 1 -$tb = $tbPrefix . $i - -sql select count(*) from $tb interval(1d) -print select count(*) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $tb interval(1d) - -print =============== step3 c2 -sql select count(tbcol) from $tb interval(1d) -print select count(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c2 -sql create table $st as select count(tbcol) from $tb interval(1d) - -print =============== step4 c3 -sql select count(tbcol2) from $tb interval(1d) -print select count(tbcol2) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(tbcol2) from $tb interval(1d) - -print =============== step5 avg -sql select avg(tbcol) from $tb interval(1d) -print select avg(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . av -sql create table $st as select avg(tbcol) from $tb interval(1d) - -print =============== step6 su -sql select sum(tbcol) from $tb interval(1d) -print select sum(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 190 then - return -1 -endi - -$st = $stPrefix . su -sql create table $st as select sum(tbcol) from $tb interval(1d) - -print =============== step7 mi -sql select min(tbcol) from $tb interval(1d) -print select min(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . mi -sql create table $st as select min(tbcol) from $tb interval(1d) - -print =============== step8 ma -sql select max(tbcol) from $tb interval(1d) -print select max(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . ma -sql create table $st as select max(tbcol) from $tb interval(1d) - -print =============== step9 fi -sql select first(tbcol) from $tb interval(1d) -print select first(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . fi -sql create table $st as select first(tbcol) from $tb interval(1d) - -print =============== step10 la -sql select last(tbcol) from $tb interval(1d) -print select last(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . la -sql create table $st as select last(tbcol) from $tb interval(1d) - -print =============== step11 st -sql select stddev(tbcol) from $tb interval(1d) -print select stddev(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 5.766281297 then - return -1 -endi - -$st = $stPrefix . std -sql create table $st as select stddev(tbcol) from $tb interval(1d) - -print =============== step12 le -sql select leastsquares(tbcol, 1, 1) from $tb interval(1d) -print select leastsquares(tbcol, 1, 1) from $tb interval(1d) ===> $data00 $data01 -#if $data01 != @(0.000017, -25362055.126740)@ then -# return -1 -#endi - -$st = $stPrefix . le -sql create table $st as select leastsquares(tbcol, 1, 1) from $tb interval(1d) - -print =============== step13 pe - -sql select percentile(tbcol, 1) from $tb interval(1d) -print select percentile(tbcol, 1) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0.190000000 then - return -1 -endi - -$st = $stPrefix . pe -sql create table $st as select percentile(tbcol, 1) from $tb interval(1d) - -print =============== step14 wh -sql select count(tbcol) from $tb where ts < now + 4m interval(1d) -print select count(tbcol) from $tb where ts < now + 4m interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . wh -#sql create table $st as select count(tbcol) from $tb where ts < now + 4m interval(1d) - -print =============== step15 as -sql select count(tbcol) from $tb interval(1d) -print select count(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . as -sql create table $st as select count(tbcol) as c from $tb interval(1d) - -print =============== step16 -print sleep 22 seconds -sleep 22000 - -print =============== step17 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c2 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . av -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . su -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 190 then - return -1 -endi - -$st = $stPrefix . mi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . ma -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . fi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . la -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . std -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 5.766281297 then - return -1 -endi - -$st = $stPrefix . le -sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != @(0.000017, -25270086.331047)@ then -# return -1 -#endi - -$st = $stPrefix . pe -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0.190000000 then - return -1 -endi - -$st = $stPrefix . wh -#sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != $rowNum then -# return -1 -#endi - -$st = $stPrefix . as -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi diff --git a/tests/script/unique/stream/table_replica2_dnode3.sim b/tests/script/unique/stream/table_replica2_dnode3.sim deleted file mode 100644 index 49eb3563b3964f05f31d72a8fd1ff12f2b5b3a03..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/table_replica2_dnode3.sim +++ /dev/null @@ -1,325 +0,0 @@ -system sh/stop_dnodes.sh - - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode3 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 -system sh/exec.sh -n dnode1 -s start - - -sql connect - -sql create dnode $hostname2 -sql create dnode $hostname3 -system sh/exec.sh -n dnode2 -s start -system sh/exec.sh -n dnode3 -s start -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi -if $data4_192.168.0.3 == offline then - goto createDnode -endi - -print ======================== dnode1 start - -$dbPrefix = t2d3_db -$tbPrefix = t2d3_tb -$mtPrefix = t2d3_mt -$stPrefix = t2d3_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db replica 2 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 -$i = 1 -$tb = $tbPrefix . $i - -sql select count(*) from $tb interval(1d) -print select count(*) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $tb interval(1d) - -print =============== step3 c2 -sql select count(tbcol) from $tb interval(1d) -print select count(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c2 -sql create table $st as select count(tbcol) from $tb interval(1d) - -print =============== step4 c3 -sql select count(tbcol2) from $tb interval(1d) -print select count(tbcol2) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(tbcol2) from $tb interval(1d) - -print =============== step5 avg -sql select avg(tbcol) from $tb interval(1d) -print select avg(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . av -sql create table $st as select avg(tbcol) from $tb interval(1d) - -print =============== step6 su -sql select sum(tbcol) from $tb interval(1d) -print select sum(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 190 then - return -1 -endi - -$st = $stPrefix . su -sql create table $st as select sum(tbcol) from $tb interval(1d) - -print =============== step7 mi -sql select min(tbcol) from $tb interval(1d) -print select min(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . mi -sql create table $st as select min(tbcol) from $tb interval(1d) - -print =============== step8 ma -sql select max(tbcol) from $tb interval(1d) -print select max(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . ma -sql create table $st as select max(tbcol) from $tb interval(1d) - -print =============== step9 fi -sql select first(tbcol) from $tb interval(1d) -print select first(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . fi -sql create table $st as select first(tbcol) from $tb interval(1d) - -print =============== step10 la -sql select last(tbcol) from $tb interval(1d) -print select last(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . la -sql create table $st as select last(tbcol) from $tb interval(1d) - -print =============== step11 st -sql select stddev(tbcol) from $tb interval(1d) -print select stddev(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 5.766281297 then - return -1 -endi - -$st = $stPrefix . std -sql create table $st as select stddev(tbcol) from $tb interval(1d) - -print =============== step12 le -sql select leastsquares(tbcol, 1, 1) from $tb interval(1d) -print select leastsquares(tbcol, 1, 1) from $tb interval(1d) ===> $data00 $data01 -#if $data01 != @(0.000017, -25362055.126740)@ then -# return -1 -#endi - -$st = $stPrefix . le -sql create table $st as select leastsquares(tbcol, 1, 1) from $tb interval(1d) - -print =============== step13 pe - -sql select percentile(tbcol, 1) from $tb interval(1d) -print select percentile(tbcol, 1) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0.190000000 then - return -1 -endi - -$st = $stPrefix . pe -sql create table $st as select percentile(tbcol, 1) from $tb interval(1d) - -print =============== step14 wh -sql select count(tbcol) from $tb where ts < now + 4m interval(1d) -print select count(tbcol) from $tb where ts < now + 4m interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . wh -#sql create table $st as select count(tbcol) from $tb where ts < now + 4m interval(1d) - -print =============== step15 as -sql select count(tbcol) from $tb interval(1d) -print select count(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . as -sql create table $st as select count(tbcol) as c from $tb interval(1d) - -print =============== step16 -print sleep 22 seconds -sleep 22000 - -print =============== step17 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c2 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . av -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . su -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 190 then - return -1 -endi - -$st = $stPrefix . mi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . ma -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . fi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . la -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . std -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 5.766281297 then - return -1 -endi - -$st = $stPrefix . le -sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != @(0.000017, -25270086.331047)@ then -# return -1 -#endi - -$st = $stPrefix . pe -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0.190000000 then - return -1 -endi - -$st = $stPrefix . wh -#sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != $rowNum then -# return -1 -#endi - -$st = $stPrefix . as -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi diff --git a/tests/script/unique/stream/table_replica3_dnode4.sim b/tests/script/unique/stream/table_replica3_dnode4.sim deleted file mode 100644 index 2cc443c72fc656b87ca8c1d330381ed5078cd755..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/table_replica3_dnode4.sim +++ /dev/null @@ -1,333 +0,0 @@ -system sh/stop_dnodes.sh - - - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 -system sh/deploy.sh -n dnode4 -i 4 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode3 -c walLevel -v 1 -system sh/cfg.sh -n dnode4 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 -system sh/exec.sh -n dnode1 -s start - -sql connect - -sql create dnode $hostname2 -sql create dnode $hostname3 -sql create dnode $hostname4 -system sh/exec.sh -n dnode2 -s start -system sh/exec.sh -n dnode3 -s start -system sh/exec.sh -n dnode4 -s start -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi -if $data4_192.168.0.3 == offline then - goto createDnode -endi -if $data4_192.168.0.4 == offline then - goto createDnode -endi - -print ======================== dnode1 start - -$dbPrefix = t3d_db -$tbPrefix = t3d_tb -$mtPrefix = t3d_mt -$stPrefix = t3d_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db replica 3 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 -$i = 1 -$tb = $tbPrefix . $i - -sql select count(*) from $tb interval(1d) -print select count(*) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $tb interval(1d) - -print =============== step3 c2 -sql select count(tbcol) from $tb interval(1d) -print select count(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c2 -sql create table $st as select count(tbcol) from $tb interval(1d) - -print =============== step4 c3 -sql select count(tbcol2) from $tb interval(1d) -print select count(tbcol2) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(tbcol2) from $tb interval(1d) - -print =============== step5 avg -sql select avg(tbcol) from $tb interval(1d) -print select avg(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . av -sql create table $st as select avg(tbcol) from $tb interval(1d) - -print =============== step6 su -sql select sum(tbcol) from $tb interval(1d) -print select sum(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 190 then - return -1 -endi - -$st = $stPrefix . su -sql create table $st as select sum(tbcol) from $tb interval(1d) - -print =============== step7 mi -sql select min(tbcol) from $tb interval(1d) -print select min(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . mi -sql create table $st as select min(tbcol) from $tb interval(1d) - -print =============== step8 ma -sql select max(tbcol) from $tb interval(1d) -print select max(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . ma -sql create table $st as select max(tbcol) from $tb interval(1d) - -print =============== step9 fi -sql select first(tbcol) from $tb interval(1d) -print select first(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . fi -sql create table $st as select first(tbcol) from $tb interval(1d) - -print =============== step10 la -sql select last(tbcol) from $tb interval(1d) -print select last(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . la -sql create table $st as select last(tbcol) from $tb interval(1d) - -print =============== step11 st -sql select stddev(tbcol) from $tb interval(1d) -print select stddev(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 5.766281297 then - return -1 -endi - -$st = $stPrefix . std -sql create table $st as select stddev(tbcol) from $tb interval(1d) - -print =============== step12 le -sql select leastsquares(tbcol, 1, 1) from $tb interval(1d) -print select leastsquares(tbcol, 1, 1) from $tb interval(1d) ===> $data00 $data01 -#if $data01 != @(0.000017, -25362055.126740)@ then -# return -1 -#endi - -$st = $stPrefix . le -sql create table $st as select leastsquares(tbcol, 1, 1) from $tb interval(1d) - -print =============== step13 pe - -sql select percentile(tbcol, 1) from $tb interval(1d) -print select percentile(tbcol, 1) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0.190000000 then - return -1 -endi - -$st = $stPrefix . pe -sql create table $st as select percentile(tbcol, 1) from $tb interval(1d) - -print =============== step14 wh -sql select count(tbcol) from $tb where ts < now + 4m interval(1d) -print select count(tbcol) from $tb where ts < now + 4m interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . wh -#sql create table $st as select count(tbcol) from $tb where ts < now + 4m interval(1d) - -print =============== step15 as -sql select count(tbcol) from $tb interval(1d) -print select count(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . as -sql create table $st as select count(tbcol) as c from $tb interval(1d) - -print =============== step16 -print sleep 22 seconds -sleep 22000 - -print =============== step17 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c2 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . av -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . su -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 190 then - return -1 -endi - -$st = $stPrefix . mi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . ma -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . fi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . la -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . std -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 5.766281297 then - return -1 -endi - -$st = $stPrefix . le -sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != @(0.000017, -25270086.331047)@ then -# return -1 -#endi - -$st = $stPrefix . pe -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0.190000000 then - return -1 -endi - -$st = $stPrefix . wh -#sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != $rowNum then -# return -1 -#endi - -$st = $stPrefix . as -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi diff --git a/tests/script/unique/stream/table_vnode_stop.sim b/tests/script/unique/stream/table_vnode_stop.sim deleted file mode 100644 index 625de32a8d7a1e5336dd10f313565bdbc0daf0fc..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/table_vnode_stop.sim +++ /dev/null @@ -1,189 +0,0 @@ -system sh/stop_dnodes.sh - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 -system sh/exec.sh -n dnode1 -s start - -sql connect - -sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi - -print ======================== dnode start - -$dbPrefix = db -$tbPrefix = tb -$mtPrefix = mt -$stPrefix = st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql create database $db replica 2 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 -$i = 1 -$tb = $tbPrefix . $i - -sql select count(*) from $tb interval(1d) -print select count(*) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $tb interval(1d) - -print =============== step3 -system sh/exec.sh -n dnode2 -s stop - -print =============== step4 -print sleep 22 seconds -sleep 22000 - -print =============== step5 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -print ============= step6 - -sql close -system sh/exec.sh -n dnode1 -s stop -system sh/exec.sh -n dnode2 -s stop -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -sleep 2000 -system sh/exec.sh -n dnode2 -s start - -$x = 0 -connectTbase2: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql connect -x connectTbase2 -sleep 2000 - -sql create dnode $hostname1 -system sh/exec.sh -n dnode1 -s start -sleep 2000 -print ======================== dnode start - -$dbPrefix = db -$tbPrefix = tb -$mtPrefix = mt -$stPrefix = st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step7 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql create database $db replica 2 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step8 c1 -$i = 1 -$tb = $tbPrefix . $i - -sql select count(*) from $tb interval(1d) -print select count(*) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $tb interval(1d) - -print =============== step9 -system sh/exec.sh -n dnode1 -s stop - -print =============== step10 -print sleep 22 seconds -sleep 22000 - -print =============== step11 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - - - diff --git a/tests/script/unique/stream/testSuite.sim b/tests/script/unique/stream/testSuite.sim deleted file mode 100644 index bbf5da3d376d9eccc02aa61b1122cadb5fc04813..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/testSuite.sim +++ /dev/null @@ -1,15 +0,0 @@ -#run unique/stream/table_replica1_dnode2.sim -#run unique/stream/metrics_replica1_dnode2.sim -#run unique/stream/table_replica2_dnode2.sim -#run unique/stream/metrics_replica2_dnode2.sim -#run unique/stream/table_replica2_dnode2_vnoden.sim -#run unique/stream/metrics_replica2_dnode2_vnoden.sim -#run unique/stream/table_replica2_dnode3.sim -#run unique/stream/metrics_replica2_dnode3.sim -#run unique/stream/table_replica3_dnode4.sim -#run unique/stream/metrics_replica3_dnode4.sim -#run unique/stream/table_vnode_stop.sim -#run unique/stream/metrics_vnode_stop.sim -##run unique/stream/table_balance.sim -##run unique/stream/metrics_balance.sim -##run unique/stream/table_move.sim \ No newline at end of file diff --git a/tests/system-test/0-others/fsync.py b/tests/system-test/0-others/fsync.py new file mode 100644 index 0000000000000000000000000000000000000000..964550cdbc8eb5e9a6ce6b1c01884078b68263b8 --- /dev/null +++ b/tests/system-test/0-others/fsync.py @@ -0,0 +1,301 @@ +import datetime + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +PRIMARY_COL = "ts" + +INT_COL = "c1" +BINT_COL = "c2" +SINT_COL = "c3" +TINT_COL = "c4" +FLOAT_COL = "c5" +DOUBLE_COL = "c6" +BOOL_COL = "c7" + +BINARY_COL = "c8" +NCHAR_COL = "c9" +TS_COL = "c10" + +NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ] +CHAR_COL = [ BINARY_COL, NCHAR_COL, ] +BOOLEAN_COL = [ BOOL_COL, ] +TS_TYPE_COL = [ TS_COL, ] + +class TDTestCase: + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def __kill_process(self, process_name): + killCmd = f"ps -ef|grep -w {process_name}| grep -v grep | awk '{{print $2}}' | xargs kill -TERM > /dev/null 2>&1" + + psCmd = f"ps -ef|grep -w {process_name}| grep -v grep | awk '{{print $2}}'" + while processID := subprocess.check_output(psCmd, shell=True): + os.system(killCmd) + time.sleep(1) + + def test_fsync_current(self): + wal_index = 0 + fsync_index = 0 + tdSql.query("show databases") + for i in range(tdSql.queryCols): + if tdSql.cursor.description[i][0] == "wal": + wal_index = i + if tdSql.cursor.description[i][0] == "fsync": + fsync_index = i + + tdSql.execute("drop database if exists db1") + tdSql.execute("create database db1 wal 1") + tdSql.query("show databases") + for i in range(tdSql.queryRows): + if tdSql.queryResult[i][0] == "db1": + tdSql.checkData(i, wal_index, 1) + + tdSql.execute("drop database if exists db1") + tdSql.execute("create database db1 wal 2") + tdSql.query("show databases") + for i in range(tdSql.queryRows): + if tdSql.queryResult[i][0] == "db1": + tdSql.checkData(i, wal_index, 2) + + tdSql.execute("drop database if exists db1") + tdSql.execute("create database db1 fsync 0") + tdSql.query("show databases") + for i in range(tdSql.queryRows): + if tdSql.queryResult[i][0] == "db1": + tdSql.checkData(i, fsync_index, 0) + + tdSql.execute("drop database if exists db1") + tdSql.execute("create database db1 fsync 3000") + tdSql.query("show databases") + for i in range(tdSql.queryRows): + if tdSql.queryResult[i][0] == "db1": + tdSql.checkData(i, fsync_index, 3000) + + tdSql.execute("drop database if exists db1") + tdSql.execute("create database db1 fsync 180000") + tdSql.query("show databases") + for i in range(tdSql.queryRows): + if tdSql.queryResult[i][0] == "db1": + tdSql.checkData(i, fsync_index, 180000) + + + tdSql.execute("drop database if exists db1") + tdSql.execute("create database db1 wal 1 fsync 6000") + tdSql.query("show databases") + for i in range(tdSql.queryRows): + if tdSql.queryResult[i][0] == "db1": + tdSql.checkData(i, fsync_index, 6000) + tdSql.checkData(i, wal_index, 1) + + tdSql.execute("drop database if exists db1") + tdSql.execute("create database db1 wal 2 fsync 3000") + tdSql.query("show databases") + for i in range(tdSql.queryRows): + if tdSql.queryResult[i][0] == "db1": + tdSql.checkData(i, fsync_index, 3000) + tdSql.checkData(i, wal_index, 2) + + tdSql.execute("alter database db1 wal 1") + tdSql.query("show databases") + for i in range(tdSql.queryRows): + if tdSql.queryResult[i][0] == "db1": + tdSql.checkData(i, fsync_index, 3000) + tdSql.checkData(i, wal_index, 1) + + tdSql.execute("alter database db1 wal 2") + tdSql.query("show databases") + for i in range(tdSql.queryRows): + if tdSql.queryResult[i][0] == "db1": + tdSql.checkData(i, fsync_index, 3000) + tdSql.checkData(i, wal_index, 2) + + tdSql.execute("alter database db1 fsync 0") + tdSql.query("show databases") + for i in range(tdSql.queryRows): + if tdSql.queryResult[i][0] == "db1": + tdSql.checkData(i, fsync_index, 0) + tdSql.checkData(i, wal_index, 2) + + tdSql.execute("alter database db1 fsync 3000") + tdSql.query("show databases") + for i in range(tdSql.queryRows): + if tdSql.queryResult[i][0] == "db1": + tdSql.checkData(i, fsync_index, 3000) + tdSql.checkData(i, wal_index, 2) + + tdSql.execute("alter database db1 fsync 18000") + tdSql.query("show databases") + for i in range(tdSql.queryRows): + if tdSql.queryResult[i][0] == "db1": + tdSql.checkData(i, fsync_index, 18000) + tdSql.checkData(i, wal_index, 2) + + tdSql.execute("alter database db1 wal 1 fsync 3000") + tdSql.query("show databases") + for i in range(tdSql.queryRows): + if tdSql.queryResult[i][0] == "db1": + tdSql.checkData(i, fsync_index, 3000) + tdSql.checkData(i, wal_index, 1) + + tdSql.execute("drop database db1 ") + + @property + def fsync_create_err(self): + return [ + "create database db1 wal 0", + "create database db1 wal 3", + "create database db1 wal null", + "create database db1 wal true", + "create database db1 wal 1.1", + "create database db1 fsync -1", + "create database db1 fsync 180001", + "create database db1 fsync 10.111", + "create database db1 fsync true", + ] + + @property + def fsync_alter_err(self): + return [ + "alter database db1 wal 0", + "alter database db1 wal 3", + "alter database db1 wal null", + "alter database db1 wal true", + "alter database db1 wal 1.1", + "alter database db1 fsync -1", + "alter database db1 fsync 180001", + "alter database db1 fsync 10.111", + "alter database db1 fsync true", + ] + + def test_fsync_err(self): + for sql in self.fsync_create_err: + tdSql.error(sql) + tdSql.query("create database db1") + for sql in self.fsync_alter_err: + tdSql.error(sql) + tdSql.query("drop database db1") + + def all_test(self): + self.test_fsync_err() + self.test_fsync_current() + + # def __create_tb(self): + + # tdLog.printNoPrefix("==========step1:create table") + # create_stb_sql = f'''create table stb1( + # ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + # {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + # {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + # ) tags (t1 int) + # ''' + # create_ntb_sql = f'''create table t1( + # ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + # {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + # {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + # ) + # ''' + # tdSql.execute(create_stb_sql) + # tdSql.execute(create_ntb_sql) + + # for i in range(4): + # tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + # { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2} + + # def __insert_data(self, rows): + # now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) + # for i in range(rows): + # tdSql.execute( + # f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + # ) + # tdSql.execute( + # f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + # ) + # tdSql.execute( + # f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + # ) + # tdSql.execute( + # f'''insert into ct1 values + # ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) + # ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) + # ''' + # ) + + # tdSql.execute( + # f'''insert into ct4 values + # ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + # ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + # ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + # ( + # { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127, + # { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000} + # ) + # ( + # { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126, + # { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000} + # ) + # ''' + # ) + + # tdSql.execute( + # f'''insert into ct2 values + # ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + # ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + # ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + # ( + # { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126, + # { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + # ) + # ( + # { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127, + # { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + # ) + # ''' + # ) + + # for i in range(rows): + # insert_data = f'''insert into t1 values + # ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, + # "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) + # ''' + # tdSql.execute(insert_data) + # tdSql.execute( + # f'''insert into t1 values + # ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + # ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + # ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + # ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127, + # { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, + # "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + # ) + # ( + # { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126, + # { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, + # "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + # ) + # ''' + # ) + + def run(self): + tdSql.prepare() + + self.all_test() + + tdDnodes.stop(1) + tdDnodes.start(1) + + # tdSql.execute("use db") + + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/0-others/taosShell.py b/tests/system-test/0-others/taosShell.py index f6dfe3f75c795ab8bd8eefc7b9d043d75854dc2e..9c8cd85b4654d19436496bc7adcdf81b761ab242 100644 --- a/tests/system-test/0-others/taosShell.py +++ b/tests/system-test/0-others/taosShell.py @@ -3,8 +3,12 @@ import taos import sys import time import socket -import pexpect import os +import platform +if platform.system().lower() == 'windows': + import wexpect as taosExpect +else: + import pexpect as taosExpect from util.log import * from util.sql import * @@ -15,7 +19,11 @@ def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key if len(key) == 0: tdLog.exit("taos test key is null!") - taosCmd = buildPath + '/build/bin/taos ' + if platform.system().lower() == 'windows': + taosCmd = buildPath + '\\build\\bin\\taos.exe ' + taosCmd = taosCmd.replace('\\','\\\\') + else: + taosCmd = buildPath + '/build/bin/taos ' if len(cfgDir) != 0: taosCmd = taosCmd + '-c ' + cfgDir @@ -36,25 +44,30 @@ def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key tdLog.info ("taos cmd: %s" % taosCmd) - child = pexpect.spawn(taosCmd, timeout=3) + child = taosExpect.spawn(taosCmd, timeout=3) #output = child.readline() #print (output.decode()) if len(expectString) != 0: - i = child.expect([expectString, pexpect.TIMEOUT, pexpect.EOF], timeout=6) + i = child.expect([expectString, taosExpect.TIMEOUT, taosExpect.EOF], timeout=6) else: - i = child.expect([pexpect.TIMEOUT, pexpect.EOF], timeout=6) + i = child.expect([taosExpect.TIMEOUT, taosExpect.EOF], timeout=6) - retResult = child.before.decode() + if platform.system().lower() == 'windows': + retResult = child.before + else: + retResult = child.before.decode() print(retResult) #print(child.after.decode()) if i == 0: print ('taos login success! Here can run sql, taos> ') if len(sqlString) != 0: child.sendline (sqlString) - w = child.expect(["Query OK", pexpect.TIMEOUT, pexpect.EOF], timeout=1) + w = child.expect(["Query OK", taosExpect.TIMEOUT, taosExpect.EOF], timeout=1) if w == 0: return "TAOS_OK" else: + print(1) + print(retResult) return "TAOS_FAIL" else: if key == 'A' or key1 == 'A' or key == 'C' or key1 == 'C' or key == 'V' or key1 == 'V': @@ -102,7 +115,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] @@ -275,11 +288,15 @@ class TDTestCase: pwd=os.getcwd() newDbName="dbf" sqlFile = pwd + "/0-others/sql.txt" - sql1 = "echo 'create database " + newDbName + "' > " + sqlFile - sql2 = "echo 'use " + newDbName + "' >> " + sqlFile - sql3 = "echo 'create table ntbf (ts timestamp, c binary(40))' >> " + sqlFile - sql4 = "echo 'insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\")' >> " + sqlFile - sql5 = "echo 'show databases' >> " + sqlFile + sql1 = "echo create database " + newDbName + " > " + sqlFile + sql2 = "echo use " + newDbName + " >> " + sqlFile + if platform.system().lower() == 'windows': + sql3 = "echo create table ntbf (ts timestamp, c binary(40)) >> " + sqlFile + sql4 = "echo insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\") >> " + sqlFile + else: + sql3 = "echo 'create table ntbf (ts timestamp, c binary(40))' >> " + sqlFile + sql4 = "echo 'insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\")' >> " + sqlFile + sql5 = "echo show databases >> " + sqlFile os.system(sql1) os.system(sql2) os.system(sql3) diff --git a/tests/system-test/0-others/taosShellError.py b/tests/system-test/0-others/taosShellError.py index 5f2f79982a58fe33e361f7c05926fc7c276f84d7..e00fe89461b2e8aeb7e9c545f0d40e8aa6363a50 100644 --- a/tests/system-test/0-others/taosShellError.py +++ b/tests/system-test/0-others/taosShellError.py @@ -3,7 +3,11 @@ import taos import sys import time import socket -import pexpect +import platform +if platform.system().lower() == 'windows': + import wexpect as taosExpect +else: + import pexpect as taosExpect import os from util.log import * @@ -15,7 +19,11 @@ def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key if len(key) == 0: tdLog.exit("taos test key is null!") - taosCmd = buildPath + '/build/bin/taos ' + if platform.system().lower() == 'windows': + taosCmd = buildPath + '\\build\\bin\\taos.exe ' + taosCmd = taosCmd.replace('\\','\\\\') + else: + taosCmd = buildPath + '/build/bin/taos ' if len(cfgDir) != 0: taosCmd = taosCmd + '-c ' + cfgDir @@ -36,23 +44,29 @@ def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key tdLog.info ("taos cmd: %s" % taosCmd) - child = pexpect.spawn(taosCmd, timeout=3) + child = taosExpect.spawn(taosCmd, timeout=3) #output = child.readline() #print (output.decode()) if len(expectString) != 0: - i = child.expect([expectString, pexpect.TIMEOUT, pexpect.EOF], timeout=6) + i = child.expect([expectString, taosExpect.TIMEOUT, taosExpect.EOF], timeout=6) else: - i = child.expect([pexpect.TIMEOUT, pexpect.EOF], timeout=6) + i = child.expect([taosExpect.TIMEOUT, taosExpect.EOF], timeout=6) - retResult = child.before.decode() + if platform.system().lower() == 'windows': + retResult = child.before + else: + retResult = child.before.decode() print("cmd return result:\n%s\n"%retResult) #print(child.after.decode()) if i == 0: print ('taos login success! Here can run sql, taos> ') if len(sqlString) != 0: child.sendline (sqlString) - w = child.expect(["Query OK", pexpect.TIMEOUT, pexpect.EOF], timeout=1) - retResult = child.before.decode() + w = child.expect(["Query OK", taosExpect.TIMEOUT, taosExpect.EOF], timeout=1) + if platform.system().lower() == 'windows': + retResult = child.before + else: + retResult = child.before.decode() if w == 0: return "TAOS_OK", retResult else: @@ -103,7 +117,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] @@ -216,11 +230,15 @@ class TDTestCase: pwd=os.getcwd() newDbName="dbf" sqlFile = pwd + "/0-others/sql.txt" - sql1 = "echo 'create database " + newDbName + "' > " + sqlFile - sql2 = "echo 'use " + newDbName + "' >> " + sqlFile - sql3 = "echo 'create table ntbf (ts timestamp, c binary(40)) no this item' >> " + sqlFile - sql4 = "echo 'insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\")' >> " + sqlFile - sql5 = "echo 'show databases' >> " + sqlFile + sql1 = "echo create database " + newDbName + " > " + sqlFile + sql2 = "echo use " + newDbName + " >> " + sqlFile + if platform.system().lower() == 'windows': + sql3 = "echo create table ntbf (ts timestamp, c binary(40)) no this item >> " + sqlFile + sql4 = "echo insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\") >> " + sqlFile + else: + sql3 = "echo 'create table ntbf (ts timestamp, c binary(40)) no this item' >> " + sqlFile + sql4 = "echo 'insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\")' >> " + sqlFile + sql5 = "echo show databases >> " + sqlFile os.system(sql1) os.system(sql2) os.system(sql3) diff --git a/tests/system-test/0-others/taosShellNetChk.py b/tests/system-test/0-others/taosShellNetChk.py index bbaeacf328fd5422ccd018a79ce6d9c632a370a9..c81d4af3c555a27b117e1551d6aef01820d3ee1c 100644 --- a/tests/system-test/0-others/taosShellNetChk.py +++ b/tests/system-test/0-others/taosShellNetChk.py @@ -3,7 +3,11 @@ import taos import sys import time import socket -import pexpect +import platform +if platform.system().lower() == 'windows': + import wexpect as taosExpect +else: + import pexpect as taosExpect import os from util.log import * @@ -15,7 +19,11 @@ def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key if len(key) == 0: tdLog.exit("taos test key is null!") - taosCmd = buildPath + '/build/bin/taos ' + if platform.system().lower() == 'windows': + taosCmd = buildPath + '\\build\\bin\\taos.exe ' + taosCmd = taosCmd.replace('\\','\\\\') + else: + taosCmd = buildPath + '/build/bin/taos ' if len(cfgDir) != 0: taosCmd = taosCmd + '-c ' + cfgDir @@ -36,23 +44,29 @@ def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key tdLog.info ("taos cmd: %s" % taosCmd) - child = pexpect.spawn(taosCmd, timeout=3) + child = taosExpect.spawn(taosCmd, timeout=3) #output = child.readline() #print (output.decode()) if len(expectString) != 0: - i = child.expect([expectString, pexpect.TIMEOUT, pexpect.EOF], timeout=6) + i = child.expect([expectString, taosExpect.TIMEOUT, taosExpect.EOF], timeout=6) else: - i = child.expect([pexpect.TIMEOUT, pexpect.EOF], timeout=6) + i = child.expect([taosExpect.TIMEOUT, taosExpect.EOF], timeout=6) - retResult = child.before.decode() + if platform.system().lower() == 'windows': + retResult = child.before + else: + retResult = child.before.decode() print("expect() return code: %d, content:\n %s\n"%(i, retResult)) #print(child.after.decode()) if i == 0: print ('taos login success! Here can run sql, taos> ') if len(sqlString) != 0: child.sendline (sqlString) - w = child.expect(["Query OK", pexpect.TIMEOUT, pexpect.EOF], timeout=1) - retResult = child.before.decode() + w = child.expect(["Query OK", taosExpect.TIMEOUT, taosExpect.EOF], timeout=1) + if platform.system().lower() == 'windows': + retResult = child.before + else: + retResult = child.before.decode() if w == 0: return "TAOS_OK", retResult else: @@ -103,7 +117,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] @@ -168,21 +182,33 @@ class TDTestCase: tdDnodes.stop(1) role = 'server' - taosCmd = 'nohup ' + buildPath + '/build/bin/taos -c ' + keyDict['c'] - taosCmd = taosCmd + ' -n ' + role + ' > /dev/null 2>&1 &' + if platform.system().lower() == 'windows': + taosCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\taos.exe -c ' + keyDict['c'] + taosCmd = taosCmd.replace('\\','\\\\') + taosCmd = taosCmd + ' -n ' + role + else: + taosCmd = 'nohup ' + buildPath + '/build/bin/taos -c ' + keyDict['c'] + taosCmd = taosCmd + ' -n ' + role + ' > /dev/null 2>&1 &' print (taosCmd) os.system(taosCmd) pktLen = '2000' pktNum = '10' role = 'client' - taosCmd = buildPath + '/build/bin/taos -c ' + keyDict['c'] + if platform.system().lower() == 'windows': + taosCmd = buildPath + '\\build\\bin\\taos.exe -c ' + keyDict['c'] + taosCmd = taosCmd.replace('\\','\\\\') + else: + taosCmd = buildPath + '/build/bin/taos -c ' + keyDict['c'] taosCmd = taosCmd + ' -n ' + role + ' -l ' + pktLen + ' -N ' + pktNum print (taosCmd) - child = pexpect.spawn(taosCmd, timeout=3) - i = child.expect([pexpect.TIMEOUT, pexpect.EOF], timeout=6) + child = taosExpect.spawn(taosCmd, timeout=3) + i = child.expect([taosExpect.TIMEOUT, taosExpect.EOF], timeout=6) - retResult = child.before.decode() + if platform.system().lower() == 'windows': + retResult = child.before + else: + retResult = child.before.decode() print("expect() return code: %d, content:\n %s\n"%(i, retResult)) #print(child.after.decode()) if i == 0: @@ -195,7 +221,10 @@ class TDTestCase: else: tdLog.exit('taos -n client fail!') - os.system('pkill taos') + if platform.system().lower() == 'windows': + os.system('ps -a | grep taos | awk \'{print $2}\' | xargs kill -9') + else: + os.system('pkill taos') def stop(self): tdSql.close() diff --git a/tests/system-test/0-others/taosdMonitor.py b/tests/system-test/0-others/taosdMonitor.py index a3d3b052047faa12618a0b68846518269c9de3f5..657979658e5b048e8d51d0bf8c67bada5f321402 100644 --- a/tests/system-test/0-others/taosdMonitor.py +++ b/tests/system-test/0-others/taosdMonitor.py @@ -2,7 +2,7 @@ import taos import sys import time import socket -import pexpect +# import pexpect import os import http.server import gzip diff --git a/tests/system-test/0-others/telemetry.py b/tests/system-test/0-others/telemetry.py index 3ab39f9e7bb14b40f7caaa2b6f3bff43869c1e21..203f87c085fe91a9a75cc4176065a893fc29cf1e 100644 --- a/tests/system-test/0-others/telemetry.py +++ b/tests/system-test/0-others/telemetry.py @@ -2,7 +2,7 @@ import taos import sys import time import socket -import pexpect +# import pexpect import os import http.server import gzip diff --git a/tests/system-test/0-others/udfTest.py b/tests/system-test/0-others/udfTest.py index 679b41509891d1efe92507a81f7add51b9f76253..46d0a6968875a5e6c484c932abb41946f56bc8ee 100644 --- a/tests/system-test/0-others/udfTest.py +++ b/tests/system-test/0-others/udfTest.py @@ -134,7 +134,7 @@ class TDTestCase: def create_udf_function(self): - for i in range(10): + for i in range(5): # create scalar functions tdSql.execute("create function udf1 as '/tmp/udf/libudf1.so' outputtype int bufSize 8;") @@ -644,16 +644,12 @@ class TDTestCase: self.create_udf_function() self.basic_udf_query() self.loop_kill_udfd() - - self.unexpected_create() tdSql.execute(" drop function udf1 ") tdSql.execute(" drop function udf2 ") self.create_udf_function() time.sleep(2) self.basic_udf_query() self.test_function_name() - self.restart_taosd_query_udf() - def stop(self): diff --git a/tests/system-test/0-others/udf_create.py b/tests/system-test/0-others/udf_create.py new file mode 100644 index 0000000000000000000000000000000000000000..e2c6e3c10bd1520c58c4400fd58c741d2904a420 --- /dev/null +++ b/tests/system-test/0-others/udf_create.py @@ -0,0 +1,654 @@ +from distutils.log import error +import taos +import sys +import time +import os + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +import subprocess + +class TDTestCase: + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def prepare_udf_so(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + print(projPath) + + libudf1 = subprocess.Popen('find %s -name "libudf1.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") + libudf2 = subprocess.Popen('find %s -name "libudf2.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") + os.system("mkdir /tmp/udf/") + os.system("cp %s /tmp/udf/ "%libudf1.replace("\n" ,"")) + os.system("cp %s /tmp/udf/ "%libudf2.replace("\n" ,"")) + + + def prepare_data(self): + + tdSql.execute("drop database if exists db ") + tdSql.execute("create database if not exists db days 300") + tdSql.execute("use db") + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + + for i in range(9): + tdSql.execute( + f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute( + f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + + tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + + tdSql.execute( + f'''insert into t1 values + ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) + ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) + ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a ) + ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a ) + ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a ) + ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a ) + ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" ) + ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" ) + ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" ) + ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ''' + ) + + tdSql.execute("create table tb (ts timestamp , num1 int , num2 int, num3 double , num4 binary(30))") + tdSql.execute( + f'''insert into tb values + ( '2020-04-21 01:01:01.000', NULL, 1, 1, "binary1" ) + ( '2020-10-21 01:01:01.000', 1, 1, 1.11, "binary1" ) + ( '2020-12-31 01:01:01.000', 2, 22222, 22, "binary1" ) + ( '2021-01-01 01:01:06.000', 3, 33333, 33, "binary1" ) + ( '2021-05-07 01:01:10.000', 4, 44444, 44, "binary1" ) + ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, "binary1" ) + ( '2021-09-30 01:01:16.000', 5, 55555, 55, "binary1" ) + ( '2022-02-01 01:01:20.000', 6, 66666, 66, "binary1" ) + ( '2022-10-28 01:01:26.000', 0, 00000, 00, "binary1" ) + ( '2022-12-01 01:01:30.000', 8, -88888, -88, "binary1" ) + ( '2022-12-31 01:01:36.000', 9, -9999999, -99, "binary1" ) + ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, "binary1" ) + ''' + ) + + # udf functions with join + ts_start = 1652517451000 + tdSql.execute("create stable st (ts timestamp , c1 int , c2 int ,c3 double ,c4 double ) tags(ind int)") + tdSql.execute("create table sub1 using st tags(1)") + tdSql.execute("create table sub2 using st tags(2)") + + for i in range(10): + ts = ts_start + i *1000 + tdSql.execute(" insert into sub1 values({} , {},{},{},{})".format(ts,i ,i*10,i*100.0,i*1000.0)) + tdSql.execute(" insert into sub2 values({} , {},{},{},{})".format(ts,i ,i*10,i*100.0,i*1000.0)) + + + def create_udf_function(self): + + for i in range(5): + # create scalar functions + tdSql.execute("create function udf1 as '/tmp/udf/libudf1.so' outputtype int bufSize 8;") + + # create aggregate functions + + tdSql.execute("create aggregate function udf2 as '/tmp/udf/libudf2.so' outputtype double bufSize 8;") + + functions = tdSql.getResult("show functions") + function_nums = len(functions) + if function_nums == 2: + tdLog.info("create two udf functions success ") + + # drop functions + + tdSql.execute("drop function udf1") + tdSql.execute("drop function udf2") + + functions = tdSql.getResult("show functions") + for function in functions: + if "udf1" in function[0] or "udf2" in function[0]: + tdLog.info("drop udf functions failed ") + tdLog.exit("drop udf functions failed") + + tdLog.info("drop two udf functions success ") + + # create scalar functions + tdSql.execute("create function udf1 as '/tmp/udf/libudf1.so' outputtype int bufSize 8;") + + # create aggregate functions + + tdSql.execute("create aggregate function udf2 as '/tmp/udf/libudf2.so' outputtype double bufSize 8;") + + functions = tdSql.getResult("show functions") + function_nums = len(functions) + if function_nums == 2: + tdLog.info("create two udf functions success ") + + def basic_udf_query(self): + + # scalar functions + + tdSql.execute("use db ") + tdSql.query("select num1 , udf1(num1) ,num2 ,udf1(num2),num3 ,udf1(num3),num4 ,udf1(num4) from tb") + tdSql.checkData(0,0,None) + tdSql.checkData(0,1,None) + tdSql.checkData(0,2,1) + tdSql.checkData(0,3,88) + tdSql.checkData(0,4,1.000000000) + tdSql.checkData(0,5,88) + tdSql.checkData(0,6,"binary1") + tdSql.checkData(0,7,88) + + tdSql.checkData(3,0,3) + tdSql.checkData(3,1,88) + tdSql.checkData(3,2,33333) + tdSql.checkData(3,3,88) + tdSql.checkData(3,4,33.000000000) + tdSql.checkData(3,5,88) + tdSql.checkData(3,6,"binary1") + tdSql.checkData(3,7,88) + + tdSql.checkData(11,0,None) + tdSql.checkData(11,1,None) + tdSql.checkData(11,2,None) + tdSql.checkData(11,3,None) + tdSql.checkData(11,4,None) + tdSql.checkData(11,5,None) + tdSql.checkData(11,6,"binary1") + tdSql.checkData(11,7,88) + + tdSql.query("select c1 , udf1(c1) ,c2 ,udf1(c2), c3 ,udf1(c3), c4 ,udf1(c4) from stb1 order by c1") + tdSql.checkData(0,0,None) + tdSql.checkData(0,1,None) + tdSql.checkData(0,2,None) + tdSql.checkData(0,3,None) + tdSql.checkData(0,4,None) + tdSql.checkData(0,5,None) + tdSql.checkData(0,6,None) + tdSql.checkData(0,7,None) + + tdSql.checkData(20,0,8) + tdSql.checkData(20,1,88) + tdSql.checkData(20,2,88888) + tdSql.checkData(20,3,88) + tdSql.checkData(20,4,888) + tdSql.checkData(20,5,88) + tdSql.checkData(20,6,88) + tdSql.checkData(20,7,88) + + + # aggregate functions + tdSql.query("select udf2(num1) ,udf2(num2), udf2(num3) from tb") + tdSql.checkData(0,0,15.362291496) + tdSql.checkData(0,1,10000949.553189287) + tdSql.checkData(0,2,168.633425216) + + # Arithmetic compute + tdSql.query("select udf2(num1)+100 ,udf2(num2)-100, udf2(num3)*100 ,udf2(num3)/100 from tb") + tdSql.checkData(0,0,115.362291496) + tdSql.checkData(0,1,10000849.553189287) + tdSql.checkData(0,2,16863.342521576) + tdSql.checkData(0,3,1.686334252) + + tdSql.query("select udf2(c1) ,udf2(c6) from stb1 ") + tdSql.checkData(0,0,25.514701644) + tdSql.checkData(0,1,265.247614504) + + tdSql.query("select udf2(c1)+100 ,udf2(c6)-100 ,udf2(c1)*100 ,udf2(c6)/100 from stb1 ") + tdSql.checkData(0,0,125.514701644) + tdSql.checkData(0,1,165.247614504) + tdSql.checkData(0,2,2551.470164435) + tdSql.checkData(0,3,2.652476145) + + # # bug for crash when query sub table + tdSql.query("select udf2(c1+100) ,udf2(c6-100) ,udf2(c1*100) ,udf2(c6/100) from ct1") + tdSql.checkData(0,0,378.215547010) + tdSql.checkData(0,1,353.808067460) + tdSql.checkData(0,2,2114.237451187) + tdSql.checkData(0,3,2.125468151) + + tdSql.query("select udf2(c1+100) ,udf2(c6-100) ,udf2(c1*100) ,udf2(c6/100) from stb1 ") + tdSql.checkData(0,0,490.358032462) + tdSql.checkData(0,1,400.460106627) + tdSql.checkData(0,2,2551.470164435) + tdSql.checkData(0,3,2.652476145) + + + # regular table with aggregate functions + + tdSql.error("select udf1(num1) , count(num1) from tb;") + tdSql.error("select udf1(num1) , avg(num1) from tb;") + tdSql.error("select udf1(num1) , twa(num1) from tb;") + tdSql.error("select udf1(num1) , irate(num1) from tb;") + tdSql.error("select udf1(num1) , sum(num1) from tb;") + tdSql.error("select udf1(num1) , stddev(num1) from tb;") + tdSql.error("select udf1(num1) , mode(num1) from tb;") + tdSql.error("select udf1(num1) , HYPERLOGLOG(num1) from tb;") + # stable + tdSql.error("select udf1(c1) , count(c1) from stb1;") + tdSql.error("select udf1(c1) , avg(c1) from stb1;") + tdSql.error("select udf1(c1) , twa(c1) from stb1;") + tdSql.error("select udf1(c1) , irate(c1) from stb1;") + tdSql.error("select udf1(c1) , sum(c1) from stb1;") + tdSql.error("select udf1(c1) , stddev(c1) from stb1;") + tdSql.error("select udf1(c1) , mode(c1) from stb1;") + tdSql.error("select udf1(c1) , HYPERLOGLOG(c1) from stb1;") + + # regular table with select functions + + tdSql.query("select udf1(num1) , max(num1) from tb;") + tdSql.checkRows(1) + tdSql.query("select floor(num1) , max(num1) from tb;") + tdSql.checkRows(1) + tdSql.query("select udf1(num1) , min(num1) from tb;") + tdSql.checkRows(1) + tdSql.query("select ceil(num1) , min(num1) from tb;") + tdSql.checkRows(1) + tdSql.error("select udf1(num1) , first(num1) from tb;") + + tdSql.error("select abs(num1) , first(num1) from tb;") + + tdSql.error("select udf1(num1) , last(num1) from tb;") + + tdSql.error("select round(num1) , last(num1) from tb;") + + tdSql.query("select udf1(num1) , top(num1,1) from tb;") + tdSql.checkRows(1) + tdSql.query("select udf1(num1) , bottom(num1,1) from tb;") + tdSql.checkRows(1) + tdSql.error("select udf1(num1) , last_row(num1) from tb;") + + tdSql.error("select round(num1) , last_row(num1) from tb;") + + + # stable + tdSql.query("select udf1(c1) , max(c1) from stb1;") + tdSql.checkRows(1) + tdSql.query("select abs(c1) , max(c1) from stb1;") + tdSql.checkRows(1) + tdSql.query("select udf1(c1) , min(c1) from stb1;") + tdSql.checkRows(1) + tdSql.query("select floor(c1) , min(c1) from stb1;") + tdSql.checkRows(1) + tdSql.error("select udf1(c1) , first(c1) from stb1;") + + tdSql.error("select udf1(c1) , last(c1) from stb1;") + + tdSql.query("select udf1(c1) , top(c1 ,1) from stb1;") + tdSql.checkRows(1) + tdSql.query("select abs(c1) , top(c1 ,1) from stb1;") + tdSql.checkRows(1) + tdSql.query("select udf1(c1) , bottom(c1,1) from stb1;") + tdSql.checkRows(1) + tdSql.query("select ceil(c1) , bottom(c1,1) from stb1;") + tdSql.checkRows(1) + + tdSql.error("select udf1(c1) , last_row(c1) from stb1;") + tdSql.error("select ceil(c1) , last_row(c1) from stb1;") + + # regular table with compute functions + + tdSql.query("select udf1(num1) , abs(num1) from tb;") + tdSql.checkRows(12) + tdSql.query("select floor(num1) , abs(num1) from tb;") + tdSql.checkRows(12) + + # # bug need fix + + #tdSql.query("select udf1(num1) , csum(num1) from tb;") + #tdSql.checkRows(9) + #tdSql.query("select ceil(num1) , csum(num1) from tb;") + #tdSql.checkRows(9) + #tdSql.query("select udf1(c1) , csum(c1) from stb1;") + #tdSql.checkRows(22) + #tdSql.query("select floor(c1) , csum(c1) from stb1;") + #tdSql.checkRows(22) + + # stable with compute functions + tdSql.query("select udf1(c1) , abs(c1) from stb1;") + tdSql.checkRows(25) + tdSql.query("select abs(c1) , ceil(c1) from stb1;") + tdSql.checkRows(25) + + # nest query + tdSql.query("select abs(udf1(c1)) , abs(ceil(c1)) from stb1 order by ts;") + tdSql.checkRows(25) + tdSql.checkData(0,0,None) + tdSql.checkData(0,1,None) + tdSql.checkData(1,0,88) + tdSql.checkData(1,1,8) + + tdSql.query("select abs(udf1(c1)) , abs(ceil(c1)) from ct1 order by ts;") + tdSql.checkRows(13) + tdSql.checkData(0,0,88) + tdSql.checkData(0,1,8) + tdSql.checkData(1,0,88) + tdSql.checkData(1,1,7) + + # bug fix for crash + # order by udf function result + for _ in range(50): + tdSql.query("select udf2(c1) from stb1 group by 1-udf1(c1)") + print(tdSql.queryResult) + + # udf functions with filter + + tdSql.query("select abs(udf1(c1)) , abs(ceil(c1)) from stb1 where c1 is null order by ts;") + tdSql.checkRows(3) + tdSql.checkData(0,0,None) + tdSql.checkData(0,1,None) + + tdSql.query("select c1 ,udf1(c1) , c6 ,udf1(c6) from stb1 where c1 > 8 order by ts") + tdSql.checkRows(3) + tdSql.checkData(0,0,9) + tdSql.checkData(0,1,88) + tdSql.checkData(0,2,-99.990000000) + tdSql.checkData(0,3,88) + + tdSql.query("select sub1.c1, sub2.c2 from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") + tdSql.checkData(0,0,0) + tdSql.checkData(0,1,0) + tdSql.checkData(1,0,1) + tdSql.checkData(1,1,10) + + tdSql.query("select udf1(sub1.c1), udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") + tdSql.checkData(0,0,88) + tdSql.checkData(0,1,88) + tdSql.checkData(1,0,88) + tdSql.checkData(1,1,88) + + tdSql.query("select sub1.c1 , udf1(sub1.c1), sub2.c2 ,udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") + tdSql.checkData(0,0,0) + tdSql.checkData(0,1,88) + tdSql.checkData(0,2,0) + tdSql.checkData(0,3,88) + tdSql.checkData(1,0,1) + tdSql.checkData(1,1,88) + tdSql.checkData(1,2,10) + tdSql.checkData(1,3,88) + + tdSql.query("select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") + tdSql.checkData(0,0,16.881943016) + tdSql.checkData(0,1,168.819430161) + tdSql.error("select sub1.c1 , udf2(sub1.c1), sub2.c2 ,udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") + + # udf functions with group by + tdSql.query("select udf1(c1) from ct1 group by c1") + tdSql.checkRows(10) + tdSql.query("select udf1(c1) from stb1 group by c1") + tdSql.checkRows(11) + tdSql.query("select c1,c2, udf1(c1,c2) from ct1 group by c1,c2") + tdSql.checkRows(10) + tdSql.query("select c1,c2, udf1(c1,c2) from stb1 group by c1,c2") + tdSql.checkRows(11) + + tdSql.query("select udf2(c1) from ct1 group by c1") + tdSql.checkRows(10) + tdSql.query("select udf2(c1) from stb1 group by c1") + tdSql.checkRows(11) + tdSql.query("select c1,c2, udf2(c1,c6) from ct1 group by c1,c2") + tdSql.checkRows(10) + tdSql.query("select c1,c2, udf2(c1,c6) from stb1 group by c1,c2") + tdSql.checkRows(11) + tdSql.query("select udf2(c1) from stb1 group by udf1(c1)") + tdSql.checkRows(2) + tdSql.query("select udf2(c1) from stb1 group by floor(c1)") + tdSql.checkRows(11) + + # udf mix with order by + tdSql.query("select udf2(c1) from stb1 group by floor(c1) order by udf2(c1)") + tdSql.checkRows(11) + + + def multi_cols_udf(self): + tdSql.query("select num1,num2,num3,udf1(num1,num2,num3) from tb") + tdSql.checkData(0,0,None) + tdSql.checkData(0,1,1) + tdSql.checkData(0,2,1.000000000) + tdSql.checkData(0,3,None) + tdSql.checkData(1,0,1) + tdSql.checkData(1,1,1) + tdSql.checkData(1,2,1.110000000) + tdSql.checkData(1,3,88) + + tdSql.query("select c1,c6,udf1(c1,c6) from stb1 order by ts") + tdSql.checkData(1,0,8) + tdSql.checkData(1,1,88.880000000) + tdSql.checkData(1,2,88) + + tdSql.query("select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;") + tdSql.checkRows(22) + + tdSql.query("select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") + tdSql.checkData(0,0,169.661427555) + tdSql.checkData(0,1,169.661427555) + + def try_query_sql(self): + udf1_sqls = [ + "select num1 , udf1(num1) ,num2 ,udf1(num2),num3 ,udf1(num3),num4 ,udf1(num4) from tb" , + "select c1 , udf1(c1) ,c2 ,udf1(c2), c3 ,udf1(c3), c4 ,udf1(c4) from stb1 order by c1" , + "select udf1(num1) , max(num1) from tb;" , + "select udf1(num1) , min(num1) from tb;" , + #"select udf1(num1) , top(num1,1) from tb;" , + #"select udf1(num1) , bottom(num1,1) from tb;" , + "select udf1(c1) , max(c1) from stb1;" , + "select udf1(c1) , min(c1) from stb1;" , + #"select udf1(c1) , top(c1 ,1) from stb1;" , + #"select udf1(c1) , bottom(c1,1) from stb1;" , + "select udf1(num1) , abs(num1) from tb;" , + #"select udf1(num1) , csum(num1) from tb;" , + #"select udf1(c1) , csum(c1) from stb1;" , + "select udf1(c1) , abs(c1) from stb1;" , + "select abs(udf1(c1)) , abs(ceil(c1)) from stb1 order by ts;" , + "select abs(udf1(c1)) , abs(ceil(c1)) from ct1 order by ts;" , + "select abs(udf1(c1)) , abs(ceil(c1)) from stb1 where c1 is null order by ts;" , + "select c1 ,udf1(c1) , c6 ,udf1(c6) from stb1 where c1 > 8 order by ts" , + "select udf1(sub1.c1), udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" , + "select sub1.c1 , udf1(sub1.c1), sub2.c2 ,udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" , + "select udf1(c1) from ct1 group by c1" , + "select udf1(c1) from stb1 group by c1" , + "select c1,c2, udf1(c1,c2) from ct1 group by c1,c2" , + "select c1,c2, udf1(c1,c2) from stb1 group by c1,c2" , + "select num1,num2,num3,udf1(num1,num2,num3) from tb" , + "select c1,c6,udf1(c1,c6) from stb1 order by ts" , + "select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;" + ] + udf2_sqls = ["select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" , + "select udf2(c1) from stb1 group by 1-udf1(c1)" , + "select udf2(num1) ,udf2(num2), udf2(num3) from tb" , + "select udf2(num1)+100 ,udf2(num2)-100, udf2(num3)*100 ,udf2(num3)/100 from tb" , + "select udf2(c1) ,udf2(c6) from stb1 " , + "select udf2(c1)+100 ,udf2(c6)-100 ,udf2(c1)*100 ,udf2(c6)/100 from stb1 " , + "select udf2(c1+100) ,udf2(c6-100) ,udf2(c1*100) ,udf2(c6/100) from ct1" , + "select udf2(c1+100) ,udf2(c6-100) ,udf2(c1*100) ,udf2(c6/100) from stb1 " , + "select udf2(c1) from ct1 group by c1" , + "select udf2(c1) from stb1 group by c1" , + "select c1,c2, udf2(c1,c6) from ct1 group by c1,c2" , + "select c1,c2, udf2(c1,c6) from stb1 group by c1,c2" , + "select udf2(c1) from stb1 group by udf1(c1)" , + "select udf2(c1) from stb1 group by floor(c1)" , + "select udf2(c1) from stb1 group by floor(c1) order by udf2(c1)" , + + "select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" , + "select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" , + "select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" , + "select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null"] + + return udf1_sqls ,udf2_sqls + + + + def unexpected_create(self): + + tdLog.info(" create function with out bufsize ") + tdSql.query("drop function udf1 ") + tdSql.query("drop function udf2 ") + + # create function without buffer + tdSql.execute("create function udf1 as '/tmp/udf/libudf1.so' outputtype int") + tdSql.execute("create aggregate function udf2 as '/tmp/udf/libudf2.so' outputtype double") + udf1_sqls ,udf2_sqls = self.try_query_sql() + + for scalar_sql in udf1_sqls: + tdSql.query(scalar_sql) + for aggregate_sql in udf2_sqls: + tdSql.error(aggregate_sql) + + # create function without aggregate + + tdLog.info(" create function with out aggregate ") + tdSql.query("drop function udf1 ") + tdSql.query("drop function udf2 ") + + # create function without buffer + tdSql.execute("create aggregate function udf1 as '/tmp/udf/libudf1.so' outputtype int bufSize 8 ") + tdSql.execute("create function udf2 as '/tmp/udf/libudf2.so' outputtype double bufSize 8") + udf1_sqls ,udf2_sqls = self.try_query_sql() + + for scalar_sql in udf1_sqls: + tdSql.error(scalar_sql) + for aggregate_sql in udf2_sqls: + tdSql.error(aggregate_sql) + + tdSql.execute(" create function db as '/tmp/udf/libudf1.so' outputtype int bufSize 8 ") + tdSql.execute(" create aggregate function test as '/tmp/udf/libudf1.so' outputtype int bufSize 8 ") + tdSql.error(" select db(c1) from stb1 ") + tdSql.error(" select db(c1,c6), db(c6) from stb1 ") + tdSql.error(" select db(num1,num2), db(num1) from tb ") + tdSql.error(" select test(c1) from stb1 ") + tdSql.error(" select test(c1,c6), test(c6) from stb1 ") + tdSql.error(" select test(num1,num2), test(num1) from tb ") + + + + def loop_kill_udfd(self): + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + + cfgPath = buildPath + "/../sim/dnode1/cfg" + udfdPath = buildPath +'/build/bin/udfd' + + for i in range(3): + + tdLog.info(" loop restart udfd %d_th" % i) + + tdSql.query("select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") + tdSql.checkData(0,0,169.661427555) + tdSql.checkData(0,1,169.661427555) + # stop udfd cmds + get_processID = "ps -ef | grep -w udfd | grep -v grep| grep -v defunct | awk '{print $2}'" + processID = subprocess.check_output(get_processID, shell=True).decode("utf-8") + stop_udfd = " kill -9 %s" % processID + os.system(stop_udfd) + + time.sleep(2) + + tdSql.query("select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") + tdSql.checkData(0,0,169.661427555) + tdSql.checkData(0,1,169.661427555) + + # # start udfd cmds + # start_udfd = "nohup " + udfdPath +'-c' +cfgPath +" > /dev/null 2>&1 &" + # tdLog.info("start udfd : %s " % start_udfd) + + def test_function_name(self): + tdLog.info(" create function name is not build_in functions ") + tdSql.execute(" drop function udf1 ") + tdSql.execute(" drop function udf2 ") + tdSql.error("create function max as '/tmp/udf/libudf1.so' outputtype int bufSize 8") + tdSql.error("create aggregate function sum as '/tmp/udf/libudf2.so' outputtype double bufSize 8") + tdSql.error("create function max as '/tmp/udf/libudf1.so' outputtype int bufSize 8") + tdSql.error("create aggregate function sum as '/tmp/udf/libudf2.so' outputtype double bufSize 8") + tdSql.error("create aggregate function tbname as '/tmp/udf/libudf2.so' outputtype double bufSize 8") + tdSql.error("create aggregate function function as '/tmp/udf/libudf2.so' outputtype double bufSize 8") + tdSql.error("create aggregate function stable as '/tmp/udf/libudf2.so' outputtype double bufSize 8") + tdSql.error("create aggregate function union as '/tmp/udf/libudf2.so' outputtype double bufSize 8") + tdSql.error("create aggregate function 123 as '/tmp/udf/libudf2.so' outputtype double bufSize 8") + tdSql.error("create aggregate function 123db as '/tmp/udf/libudf2.so' outputtype double bufSize 8") + tdSql.error("create aggregate function mnode as '/tmp/udf/libudf2.so' outputtype double bufSize 8") + + def restart_taosd_query_udf(self): + + self.create_udf_function() + + for i in range(5): + tdLog.info(" this is %d_th restart taosd " %i) + tdSql.execute("use db ") + tdSql.query("select count(*) from stb1") + tdSql.checkRows(1) + tdSql.query("select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") + tdSql.checkData(0,0,169.661427555) + tdSql.checkData(0,1,169.661427555) + tdDnodes.stop(1) + tdDnodes.start(1) + time.sleep(2) + + + def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring + + print(" env is ok for all ") + self.prepare_udf_so() + self.prepare_data() + self.create_udf_function() + self.basic_udf_query() + self.unexpected_create() + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/0-others/udf_restart_taosd.py b/tests/system-test/0-others/udf_restart_taosd.py new file mode 100644 index 0000000000000000000000000000000000000000..24d3b5a9c3cf702c4839e83ff02794f5bf08fcb5 --- /dev/null +++ b/tests/system-test/0-others/udf_restart_taosd.py @@ -0,0 +1,654 @@ +from distutils.log import error +import taos +import sys +import time +import os + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +import subprocess + +class TDTestCase: + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def prepare_udf_so(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + print(projPath) + + libudf1 = subprocess.Popen('find %s -name "libudf1.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") + libudf2 = subprocess.Popen('find %s -name "libudf2.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") + os.system("mkdir /tmp/udf/") + os.system("cp %s /tmp/udf/ "%libudf1.replace("\n" ,"")) + os.system("cp %s /tmp/udf/ "%libudf2.replace("\n" ,"")) + + + def prepare_data(self): + + tdSql.execute("drop database if exists db ") + tdSql.execute("create database if not exists db days 300") + tdSql.execute("use db") + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + + for i in range(9): + tdSql.execute( + f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute( + f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + + tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + + tdSql.execute( + f'''insert into t1 values + ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) + ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) + ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a ) + ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a ) + ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a ) + ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a ) + ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" ) + ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" ) + ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" ) + ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ''' + ) + + tdSql.execute("create table tb (ts timestamp , num1 int , num2 int, num3 double , num4 binary(30))") + tdSql.execute( + f'''insert into tb values + ( '2020-04-21 01:01:01.000', NULL, 1, 1, "binary1" ) + ( '2020-10-21 01:01:01.000', 1, 1, 1.11, "binary1" ) + ( '2020-12-31 01:01:01.000', 2, 22222, 22, "binary1" ) + ( '2021-01-01 01:01:06.000', 3, 33333, 33, "binary1" ) + ( '2021-05-07 01:01:10.000', 4, 44444, 44, "binary1" ) + ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, "binary1" ) + ( '2021-09-30 01:01:16.000', 5, 55555, 55, "binary1" ) + ( '2022-02-01 01:01:20.000', 6, 66666, 66, "binary1" ) + ( '2022-10-28 01:01:26.000', 0, 00000, 00, "binary1" ) + ( '2022-12-01 01:01:30.000', 8, -88888, -88, "binary1" ) + ( '2022-12-31 01:01:36.000', 9, -9999999, -99, "binary1" ) + ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, "binary1" ) + ''' + ) + + # udf functions with join + ts_start = 1652517451000 + tdSql.execute("create stable st (ts timestamp , c1 int , c2 int ,c3 double ,c4 double ) tags(ind int)") + tdSql.execute("create table sub1 using st tags(1)") + tdSql.execute("create table sub2 using st tags(2)") + + for i in range(10): + ts = ts_start + i *1000 + tdSql.execute(" insert into sub1 values({} , {},{},{},{})".format(ts,i ,i*10,i*100.0,i*1000.0)) + tdSql.execute(" insert into sub2 values({} , {},{},{},{})".format(ts,i ,i*10,i*100.0,i*1000.0)) + + + def create_udf_function(self): + + for i in range(5): + # create scalar functions + tdSql.execute("create function udf1 as '/tmp/udf/libudf1.so' outputtype int bufSize 8;") + + # create aggregate functions + + tdSql.execute("create aggregate function udf2 as '/tmp/udf/libudf2.so' outputtype double bufSize 8;") + + functions = tdSql.getResult("show functions") + function_nums = len(functions) + if function_nums == 2: + tdLog.info("create two udf functions success ") + + # drop functions + + tdSql.execute("drop function udf1") + tdSql.execute("drop function udf2") + + functions = tdSql.getResult("show functions") + for function in functions: + if "udf1" in function[0] or "udf2" in function[0]: + tdLog.info("drop udf functions failed ") + tdLog.exit("drop udf functions failed") + + tdLog.info("drop two udf functions success ") + + # create scalar functions + tdSql.execute("create function udf1 as '/tmp/udf/libudf1.so' outputtype int bufSize 8;") + + # create aggregate functions + + tdSql.execute("create aggregate function udf2 as '/tmp/udf/libudf2.so' outputtype double bufSize 8;") + + functions = tdSql.getResult("show functions") + function_nums = len(functions) + if function_nums == 2: + tdLog.info("create two udf functions success ") + + def basic_udf_query(self): + + # scalar functions + + tdSql.execute("use db ") + tdSql.query("select num1 , udf1(num1) ,num2 ,udf1(num2),num3 ,udf1(num3),num4 ,udf1(num4) from tb") + tdSql.checkData(0,0,None) + tdSql.checkData(0,1,None) + tdSql.checkData(0,2,1) + tdSql.checkData(0,3,88) + tdSql.checkData(0,4,1.000000000) + tdSql.checkData(0,5,88) + tdSql.checkData(0,6,"binary1") + tdSql.checkData(0,7,88) + + tdSql.checkData(3,0,3) + tdSql.checkData(3,1,88) + tdSql.checkData(3,2,33333) + tdSql.checkData(3,3,88) + tdSql.checkData(3,4,33.000000000) + tdSql.checkData(3,5,88) + tdSql.checkData(3,6,"binary1") + tdSql.checkData(3,7,88) + + tdSql.checkData(11,0,None) + tdSql.checkData(11,1,None) + tdSql.checkData(11,2,None) + tdSql.checkData(11,3,None) + tdSql.checkData(11,4,None) + tdSql.checkData(11,5,None) + tdSql.checkData(11,6,"binary1") + tdSql.checkData(11,7,88) + + tdSql.query("select c1 , udf1(c1) ,c2 ,udf1(c2), c3 ,udf1(c3), c4 ,udf1(c4) from stb1 order by c1") + tdSql.checkData(0,0,None) + tdSql.checkData(0,1,None) + tdSql.checkData(0,2,None) + tdSql.checkData(0,3,None) + tdSql.checkData(0,4,None) + tdSql.checkData(0,5,None) + tdSql.checkData(0,6,None) + tdSql.checkData(0,7,None) + + tdSql.checkData(20,0,8) + tdSql.checkData(20,1,88) + tdSql.checkData(20,2,88888) + tdSql.checkData(20,3,88) + tdSql.checkData(20,4,888) + tdSql.checkData(20,5,88) + tdSql.checkData(20,6,88) + tdSql.checkData(20,7,88) + + + # aggregate functions + tdSql.query("select udf2(num1) ,udf2(num2), udf2(num3) from tb") + tdSql.checkData(0,0,15.362291496) + tdSql.checkData(0,1,10000949.553189287) + tdSql.checkData(0,2,168.633425216) + + # Arithmetic compute + tdSql.query("select udf2(num1)+100 ,udf2(num2)-100, udf2(num3)*100 ,udf2(num3)/100 from tb") + tdSql.checkData(0,0,115.362291496) + tdSql.checkData(0,1,10000849.553189287) + tdSql.checkData(0,2,16863.342521576) + tdSql.checkData(0,3,1.686334252) + + tdSql.query("select udf2(c1) ,udf2(c6) from stb1 ") + tdSql.checkData(0,0,25.514701644) + tdSql.checkData(0,1,265.247614504) + + tdSql.query("select udf2(c1)+100 ,udf2(c6)-100 ,udf2(c1)*100 ,udf2(c6)/100 from stb1 ") + tdSql.checkData(0,0,125.514701644) + tdSql.checkData(0,1,165.247614504) + tdSql.checkData(0,2,2551.470164435) + tdSql.checkData(0,3,2.652476145) + + # # bug for crash when query sub table + tdSql.query("select udf2(c1+100) ,udf2(c6-100) ,udf2(c1*100) ,udf2(c6/100) from ct1") + tdSql.checkData(0,0,378.215547010) + tdSql.checkData(0,1,353.808067460) + tdSql.checkData(0,2,2114.237451187) + tdSql.checkData(0,3,2.125468151) + + tdSql.query("select udf2(c1+100) ,udf2(c6-100) ,udf2(c1*100) ,udf2(c6/100) from stb1 ") + tdSql.checkData(0,0,490.358032462) + tdSql.checkData(0,1,400.460106627) + tdSql.checkData(0,2,2551.470164435) + tdSql.checkData(0,3,2.652476145) + + + # regular table with aggregate functions + + tdSql.error("select udf1(num1) , count(num1) from tb;") + tdSql.error("select udf1(num1) , avg(num1) from tb;") + tdSql.error("select udf1(num1) , twa(num1) from tb;") + tdSql.error("select udf1(num1) , irate(num1) from tb;") + tdSql.error("select udf1(num1) , sum(num1) from tb;") + tdSql.error("select udf1(num1) , stddev(num1) from tb;") + tdSql.error("select udf1(num1) , mode(num1) from tb;") + tdSql.error("select udf1(num1) , HYPERLOGLOG(num1) from tb;") + # stable + tdSql.error("select udf1(c1) , count(c1) from stb1;") + tdSql.error("select udf1(c1) , avg(c1) from stb1;") + tdSql.error("select udf1(c1) , twa(c1) from stb1;") + tdSql.error("select udf1(c1) , irate(c1) from stb1;") + tdSql.error("select udf1(c1) , sum(c1) from stb1;") + tdSql.error("select udf1(c1) , stddev(c1) from stb1;") + tdSql.error("select udf1(c1) , mode(c1) from stb1;") + tdSql.error("select udf1(c1) , HYPERLOGLOG(c1) from stb1;") + + # regular table with select functions + + tdSql.query("select udf1(num1) , max(num1) from tb;") + tdSql.checkRows(1) + tdSql.query("select floor(num1) , max(num1) from tb;") + tdSql.checkRows(1) + tdSql.query("select udf1(num1) , min(num1) from tb;") + tdSql.checkRows(1) + tdSql.query("select ceil(num1) , min(num1) from tb;") + tdSql.checkRows(1) + tdSql.error("select udf1(num1) , first(num1) from tb;") + + tdSql.error("select abs(num1) , first(num1) from tb;") + + tdSql.error("select udf1(num1) , last(num1) from tb;") + + tdSql.error("select round(num1) , last(num1) from tb;") + + tdSql.query("select udf1(num1) , top(num1,1) from tb;") + tdSql.checkRows(1) + tdSql.query("select udf1(num1) , bottom(num1,1) from tb;") + tdSql.checkRows(1) + tdSql.error("select udf1(num1) , last_row(num1) from tb;") + + tdSql.error("select round(num1) , last_row(num1) from tb;") + + + # stable + tdSql.query("select udf1(c1) , max(c1) from stb1;") + tdSql.checkRows(1) + tdSql.query("select abs(c1) , max(c1) from stb1;") + tdSql.checkRows(1) + tdSql.query("select udf1(c1) , min(c1) from stb1;") + tdSql.checkRows(1) + tdSql.query("select floor(c1) , min(c1) from stb1;") + tdSql.checkRows(1) + tdSql.error("select udf1(c1) , first(c1) from stb1;") + + tdSql.error("select udf1(c1) , last(c1) from stb1;") + + tdSql.query("select udf1(c1) , top(c1 ,1) from stb1;") + tdSql.checkRows(1) + tdSql.query("select abs(c1) , top(c1 ,1) from stb1;") + tdSql.checkRows(1) + tdSql.query("select udf1(c1) , bottom(c1,1) from stb1;") + tdSql.checkRows(1) + tdSql.query("select ceil(c1) , bottom(c1,1) from stb1;") + tdSql.checkRows(1) + + tdSql.error("select udf1(c1) , last_row(c1) from stb1;") + tdSql.error("select ceil(c1) , last_row(c1) from stb1;") + + # regular table with compute functions + + tdSql.query("select udf1(num1) , abs(num1) from tb;") + tdSql.checkRows(12) + tdSql.query("select floor(num1) , abs(num1) from tb;") + tdSql.checkRows(12) + + # # bug need fix + + #tdSql.query("select udf1(num1) , csum(num1) from tb;") + #tdSql.checkRows(9) + #tdSql.query("select ceil(num1) , csum(num1) from tb;") + #tdSql.checkRows(9) + #tdSql.query("select udf1(c1) , csum(c1) from stb1;") + #tdSql.checkRows(22) + #tdSql.query("select floor(c1) , csum(c1) from stb1;") + #tdSql.checkRows(22) + + # stable with compute functions + tdSql.query("select udf1(c1) , abs(c1) from stb1;") + tdSql.checkRows(25) + tdSql.query("select abs(c1) , ceil(c1) from stb1;") + tdSql.checkRows(25) + + # nest query + tdSql.query("select abs(udf1(c1)) , abs(ceil(c1)) from stb1 order by ts;") + tdSql.checkRows(25) + tdSql.checkData(0,0,None) + tdSql.checkData(0,1,None) + tdSql.checkData(1,0,88) + tdSql.checkData(1,1,8) + + tdSql.query("select abs(udf1(c1)) , abs(ceil(c1)) from ct1 order by ts;") + tdSql.checkRows(13) + tdSql.checkData(0,0,88) + tdSql.checkData(0,1,8) + tdSql.checkData(1,0,88) + tdSql.checkData(1,1,7) + + # bug fix for crash + # order by udf function result + for _ in range(50): + tdSql.query("select udf2(c1) from stb1 group by 1-udf1(c1)") + print(tdSql.queryResult) + + # udf functions with filter + + tdSql.query("select abs(udf1(c1)) , abs(ceil(c1)) from stb1 where c1 is null order by ts;") + tdSql.checkRows(3) + tdSql.checkData(0,0,None) + tdSql.checkData(0,1,None) + + tdSql.query("select c1 ,udf1(c1) , c6 ,udf1(c6) from stb1 where c1 > 8 order by ts") + tdSql.checkRows(3) + tdSql.checkData(0,0,9) + tdSql.checkData(0,1,88) + tdSql.checkData(0,2,-99.990000000) + tdSql.checkData(0,3,88) + + tdSql.query("select sub1.c1, sub2.c2 from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") + tdSql.checkData(0,0,0) + tdSql.checkData(0,1,0) + tdSql.checkData(1,0,1) + tdSql.checkData(1,1,10) + + tdSql.query("select udf1(sub1.c1), udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") + tdSql.checkData(0,0,88) + tdSql.checkData(0,1,88) + tdSql.checkData(1,0,88) + tdSql.checkData(1,1,88) + + tdSql.query("select sub1.c1 , udf1(sub1.c1), sub2.c2 ,udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") + tdSql.checkData(0,0,0) + tdSql.checkData(0,1,88) + tdSql.checkData(0,2,0) + tdSql.checkData(0,3,88) + tdSql.checkData(1,0,1) + tdSql.checkData(1,1,88) + tdSql.checkData(1,2,10) + tdSql.checkData(1,3,88) + + tdSql.query("select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") + tdSql.checkData(0,0,16.881943016) + tdSql.checkData(0,1,168.819430161) + tdSql.error("select sub1.c1 , udf2(sub1.c1), sub2.c2 ,udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") + + # udf functions with group by + tdSql.query("select udf1(c1) from ct1 group by c1") + tdSql.checkRows(10) + tdSql.query("select udf1(c1) from stb1 group by c1") + tdSql.checkRows(11) + tdSql.query("select c1,c2, udf1(c1,c2) from ct1 group by c1,c2") + tdSql.checkRows(10) + tdSql.query("select c1,c2, udf1(c1,c2) from stb1 group by c1,c2") + tdSql.checkRows(11) + + tdSql.query("select udf2(c1) from ct1 group by c1") + tdSql.checkRows(10) + tdSql.query("select udf2(c1) from stb1 group by c1") + tdSql.checkRows(11) + tdSql.query("select c1,c2, udf2(c1,c6) from ct1 group by c1,c2") + tdSql.checkRows(10) + tdSql.query("select c1,c2, udf2(c1,c6) from stb1 group by c1,c2") + tdSql.checkRows(11) + tdSql.query("select udf2(c1) from stb1 group by udf1(c1)") + tdSql.checkRows(2) + tdSql.query("select udf2(c1) from stb1 group by floor(c1)") + tdSql.checkRows(11) + + # udf mix with order by + tdSql.query("select udf2(c1) from stb1 group by floor(c1) order by udf2(c1)") + tdSql.checkRows(11) + + + def multi_cols_udf(self): + tdSql.query("select num1,num2,num3,udf1(num1,num2,num3) from tb") + tdSql.checkData(0,0,None) + tdSql.checkData(0,1,1) + tdSql.checkData(0,2,1.000000000) + tdSql.checkData(0,3,None) + tdSql.checkData(1,0,1) + tdSql.checkData(1,1,1) + tdSql.checkData(1,2,1.110000000) + tdSql.checkData(1,3,88) + + tdSql.query("select c1,c6,udf1(c1,c6) from stb1 order by ts") + tdSql.checkData(1,0,8) + tdSql.checkData(1,1,88.880000000) + tdSql.checkData(1,2,88) + + tdSql.query("select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;") + tdSql.checkRows(22) + + tdSql.query("select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") + tdSql.checkData(0,0,169.661427555) + tdSql.checkData(0,1,169.661427555) + + def try_query_sql(self): + udf1_sqls = [ + "select num1 , udf1(num1) ,num2 ,udf1(num2),num3 ,udf1(num3),num4 ,udf1(num4) from tb" , + "select c1 , udf1(c1) ,c2 ,udf1(c2), c3 ,udf1(c3), c4 ,udf1(c4) from stb1 order by c1" , + "select udf1(num1) , max(num1) from tb;" , + "select udf1(num1) , min(num1) from tb;" , + #"select udf1(num1) , top(num1,1) from tb;" , + #"select udf1(num1) , bottom(num1,1) from tb;" , + "select udf1(c1) , max(c1) from stb1;" , + "select udf1(c1) , min(c1) from stb1;" , + #"select udf1(c1) , top(c1 ,1) from stb1;" , + #"select udf1(c1) , bottom(c1,1) from stb1;" , + "select udf1(num1) , abs(num1) from tb;" , + #"select udf1(num1) , csum(num1) from tb;" , + #"select udf1(c1) , csum(c1) from stb1;" , + "select udf1(c1) , abs(c1) from stb1;" , + "select abs(udf1(c1)) , abs(ceil(c1)) from stb1 order by ts;" , + "select abs(udf1(c1)) , abs(ceil(c1)) from ct1 order by ts;" , + "select abs(udf1(c1)) , abs(ceil(c1)) from stb1 where c1 is null order by ts;" , + "select c1 ,udf1(c1) , c6 ,udf1(c6) from stb1 where c1 > 8 order by ts" , + "select udf1(sub1.c1), udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" , + "select sub1.c1 , udf1(sub1.c1), sub2.c2 ,udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" , + "select udf1(c1) from ct1 group by c1" , + "select udf1(c1) from stb1 group by c1" , + "select c1,c2, udf1(c1,c2) from ct1 group by c1,c2" , + "select c1,c2, udf1(c1,c2) from stb1 group by c1,c2" , + "select num1,num2,num3,udf1(num1,num2,num3) from tb" , + "select c1,c6,udf1(c1,c6) from stb1 order by ts" , + "select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;" + ] + udf2_sqls = ["select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" , + "select udf2(c1) from stb1 group by 1-udf1(c1)" , + "select udf2(num1) ,udf2(num2), udf2(num3) from tb" , + "select udf2(num1)+100 ,udf2(num2)-100, udf2(num3)*100 ,udf2(num3)/100 from tb" , + "select udf2(c1) ,udf2(c6) from stb1 " , + "select udf2(c1)+100 ,udf2(c6)-100 ,udf2(c1)*100 ,udf2(c6)/100 from stb1 " , + "select udf2(c1+100) ,udf2(c6-100) ,udf2(c1*100) ,udf2(c6/100) from ct1" , + "select udf2(c1+100) ,udf2(c6-100) ,udf2(c1*100) ,udf2(c6/100) from stb1 " , + "select udf2(c1) from ct1 group by c1" , + "select udf2(c1) from stb1 group by c1" , + "select c1,c2, udf2(c1,c6) from ct1 group by c1,c2" , + "select c1,c2, udf2(c1,c6) from stb1 group by c1,c2" , + "select udf2(c1) from stb1 group by udf1(c1)" , + "select udf2(c1) from stb1 group by floor(c1)" , + "select udf2(c1) from stb1 group by floor(c1) order by udf2(c1)" , + + "select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" , + "select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" , + "select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" , + "select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null"] + + return udf1_sqls ,udf2_sqls + + + + def unexpected_create(self): + + tdLog.info(" create function with out bufsize ") + tdSql.query("drop function udf1 ") + tdSql.query("drop function udf2 ") + + # create function without buffer + tdSql.execute("create function udf1 as '/tmp/udf/libudf1.so' outputtype int") + tdSql.execute("create aggregate function udf2 as '/tmp/udf/libudf2.so' outputtype double") + udf1_sqls ,udf2_sqls = self.try_query_sql() + + for scalar_sql in udf1_sqls: + tdSql.query(scalar_sql) + for aggregate_sql in udf2_sqls: + tdSql.error(aggregate_sql) + + # create function without aggregate + + tdLog.info(" create function with out aggregate ") + tdSql.query("drop function udf1 ") + tdSql.query("drop function udf2 ") + + # create function without buffer + tdSql.execute("create aggregate function udf1 as '/tmp/udf/libudf1.so' outputtype int bufSize 8 ") + tdSql.execute("create function udf2 as '/tmp/udf/libudf2.so' outputtype double bufSize 8") + udf1_sqls ,udf2_sqls = self.try_query_sql() + + for scalar_sql in udf1_sqls: + tdSql.error(scalar_sql) + for aggregate_sql in udf2_sqls: + tdSql.error(aggregate_sql) + + tdSql.execute(" create function db as '/tmp/udf/libudf1.so' outputtype int bufSize 8 ") + tdSql.execute(" create aggregate function test as '/tmp/udf/libudf1.so' outputtype int bufSize 8 ") + tdSql.error(" select db(c1) from stb1 ") + tdSql.error(" select db(c1,c6), db(c6) from stb1 ") + tdSql.error(" select db(num1,num2), db(num1) from tb ") + tdSql.error(" select test(c1) from stb1 ") + tdSql.error(" select test(c1,c6), test(c6) from stb1 ") + tdSql.error(" select test(num1,num2), test(num1) from tb ") + + + + def loop_kill_udfd(self): + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + + cfgPath = buildPath + "/../sim/dnode1/cfg" + udfdPath = buildPath +'/build/bin/udfd' + + for i in range(3): + + tdLog.info(" loop restart udfd %d_th" % i) + + tdSql.query("select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") + tdSql.checkData(0,0,169.661427555) + tdSql.checkData(0,1,169.661427555) + # stop udfd cmds + get_processID = "ps -ef | grep -w udfd | grep -v grep| grep -v defunct | awk '{print $2}'" + processID = subprocess.check_output(get_processID, shell=True).decode("utf-8") + stop_udfd = " kill -9 %s" % processID + os.system(stop_udfd) + + time.sleep(2) + + tdSql.query("select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") + tdSql.checkData(0,0,169.661427555) + tdSql.checkData(0,1,169.661427555) + + # # start udfd cmds + # start_udfd = "nohup " + udfdPath +'-c' +cfgPath +" > /dev/null 2>&1 &" + # tdLog.info("start udfd : %s " % start_udfd) + + def test_function_name(self): + tdLog.info(" create function name is not build_in functions ") + tdSql.execute(" drop function udf1 ") + tdSql.execute(" drop function udf2 ") + tdSql.error("create function max as '/tmp/udf/libudf1.so' outputtype int bufSize 8") + tdSql.error("create aggregate function sum as '/tmp/udf/libudf2.so' outputtype double bufSize 8") + tdSql.error("create function max as '/tmp/udf/libudf1.so' outputtype int bufSize 8") + tdSql.error("create aggregate function sum as '/tmp/udf/libudf2.so' outputtype double bufSize 8") + tdSql.error("create aggregate function tbname as '/tmp/udf/libudf2.so' outputtype double bufSize 8") + tdSql.error("create aggregate function function as '/tmp/udf/libudf2.so' outputtype double bufSize 8") + tdSql.error("create aggregate function stable as '/tmp/udf/libudf2.so' outputtype double bufSize 8") + tdSql.error("create aggregate function union as '/tmp/udf/libudf2.so' outputtype double bufSize 8") + tdSql.error("create aggregate function 123 as '/tmp/udf/libudf2.so' outputtype double bufSize 8") + tdSql.error("create aggregate function 123db as '/tmp/udf/libudf2.so' outputtype double bufSize 8") + tdSql.error("create aggregate function mnode as '/tmp/udf/libudf2.so' outputtype double bufSize 8") + + def restart_taosd_query_udf(self): + + for i in range(3): + tdLog.info(" this is %d_th restart taosd " %i) + tdSql.execute("use db ") + tdSql.query("select count(*) from stb1") + tdSql.checkRows(1) + tdSql.query("select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null") + tdSql.checkData(0,0,169.661427555) + tdSql.checkData(0,1,169.661427555) + tdDnodes.stop(1) + tdDnodes.start(1) + time.sleep(2) + + + def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring + + print(" env is ok for all ") + self.prepare_udf_so() + self.prepare_data() + self.create_udf_function() + self.basic_udf_query() + self.multi_cols_udf() + self.restart_taosd_query_udf() + + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/0-others/user_control.py b/tests/system-test/0-others/user_control.py index 48058af295b5da8664a8a477803c7c9d8d3c526f..3adc31cc39c182fcc2ba5a6083eb4d2f4c7aaeb9 100644 --- a/tests/system-test/0-others/user_control.py +++ b/tests/system-test/0-others/user_control.py @@ -1,7 +1,9 @@ +from tabnanny import check import taos -import sys +import time import inspect import traceback +from dataclasses import dataclass from util.log import * from util.sql import * @@ -12,6 +14,10 @@ PRIVILEGES_ALL = "ALL" PRIVILEGES_READ = "READ" PRIVILEGES_WRITE = "WRITE" +WEIGHT_ALL = 5 +WEIGHT_READ = 2 +WEIGHT_WRITE = 3 + PRIMARY_COL = "ts" INT_COL = "c1" @@ -94,6 +100,7 @@ class TDconnect: self.cursor.close() self._conn.close() + def taos_connect( host = "127.0.0.1", port = 6030, @@ -111,6 +118,15 @@ def taos_connect( config=config ) + +@dataclass +class User: + name : str = None + passwd : str = None + db_set : set = None + priv : str = None + priv_weight : int = 0 + class TDTestCase: def init(self, conn, logSql): @@ -121,6 +137,22 @@ class TDTestCase: def __user_list(self): return [f"user_test{i}" for i in range(self.users_count) ] + def __users(self): + self.users = [] + self.root_user = User() + self.root_user.name = "root" + self.root_user.passwd = "taosdata" + self.root_user.db_set = set("*") + self.root_user.priv = PRIVILEGES_ALL + self.root_user.priv_weight = WEIGHT_ALL + for i in range(self.users_count): + user = User() + user.name = f"user_test{i}" + user.passwd = f"taosdata{i}" + user.db_set = set() + self.users.append(user) + return self.users + @property def __passwd_list(self): return [f"taosdata{i}" for i in range(self.users_count) ] @@ -205,76 +237,170 @@ class TDTestCase: def __grant_user_privileges(self, privilege, dbname=None, user_name="root"): return f"GRANT {privilege} ON {self.__priv_level(dbname)} TO {user_name} " - def grant_check(self, user="root", passwd="taosdata", priv=PRIVILEGES_ALL): - with taos_connect(user=user, passwd=passwd) as user: - user.query("use db") - user.query("show tables") - if priv in [PRIVILEGES_ALL, PRIVILEGES_READ]: - user.query("select * from ct1") - else: - user.error("select * from ct1") - if priv in [PRIVILEGES_ALL, PRIVILEGES_WRITE]: - user.query("insert into t1 (ts) values (now())") - else: - user.error("insert into t1 (ts) values (now())") - - def test_grant_current(self): - tdLog.printNoPrefix("==========step 1.0: if do not grant, can not read/write") - self.grant_check(user=self.__user_list[0], passwd=self.__passwd_list[0], priv=None) - - tdLog.printNoPrefix("==========step 1.1: grant read, can read, can not write") - sql = self.__grant_user_privileges(privilege=PRIVILEGES_READ, user_name=self.__user_list[0]) + def __revoke_user_privileges(self, privilege, dbname=None, user_name="root"): + return f"REVOKE {privilege} ON {self.__priv_level(dbname)} FROM {user_name} " + + def __user_check(self, user:User=None, check_priv=PRIVILEGES_ALL): + if user is None: + user = self.root_user + with taos_connect(user=user.name, passwd=user.passwd) as use: + time.sleep(2) + use.query("use db") + use.query("show tables") + if check_priv == PRIVILEGES_ALL: + use.query("select * from ct1") + use.query("insert into t1 (ts) values (now())") + elif check_priv == PRIVILEGES_READ: + use.query("select * from ct1") + use.error("insert into t1 (ts) values (now())") + elif check_priv == PRIVILEGES_WRITE: + use.error("select * from ct1") + use.query("insert into t1 (ts) values (now())") + elif check_priv is None: + use.error("select * from ct1") + use.error("insert into t1 (ts) values (now())") + + def __change_user_priv(self, user: User, pre_priv, invoke=False): + if user.priv == pre_priv and invoke : + return + if user.name == "root": + return + + if pre_priv.upper() == PRIVILEGES_ALL: + pre_weight = -5 if invoke else 5 + elif pre_priv.upper() == PRIVILEGES_READ: + pre_weight = -2 if invoke else 2 + elif pre_priv.upper() == PRIVILEGES_WRITE: + pre_weight = -3 if invoke else 3 + else: + return + pre_weight += user.priv_weight + + if pre_weight >= 5: + user.priv = PRIVILEGES_ALL + user.priv_weight = 5 + elif pre_weight == 3: + user.priv = PRIVILEGES_WRITE + user.priv_weight = pre_weight + elif pre_weight == 2: + user.priv_weight = pre_weight + user.priv = PRIVILEGES_READ + elif pre_weight in [1, -1]: + return + elif pre_weight <= 0: + user.priv_weight = 0 + user.priv = "" + + return user + + def grant_user(self, user: User = None, priv=PRIVILEGES_ALL, dbname=None): + if not user: + user = self.root_user + sql = self.__grant_user_privileges(privilege=priv, dbname=dbname, user_name=user.name) tdLog.info(sql) + if (user not in self.users and user.name != "root") or priv not in (PRIVILEGES_ALL, PRIVILEGES_READ, PRIVILEGES_WRITE): + tdSql.error(sql) tdSql.query(sql) - self.grant_check(user=self.__user_list[0], passwd=self.__passwd_list[0], priv=PRIVILEGES_READ) + self.__change_user_priv(user=user, pre_priv=priv) + user.db_set.add(dbname) + time.sleep(1) - tdLog.printNoPrefix("==========step 1.2: grant write, can write, can not read") - sql = self.__grant_user_privileges(privilege=PRIVILEGES_WRITE, user_name=self.__user_list[1]) + def revoke_user(self, user: User = None, priv=PRIVILEGES_ALL, dbname=None): + sql = self.__revoke_user_privileges(privilege=priv, dbname=dbname, user_name=user.name) tdLog.info(sql) + if user is None or priv not in (PRIVILEGES_ALL, PRIVILEGES_READ, PRIVILEGES_WRITE): + tdSql.error(sql) tdSql.query(sql) - self.grant_check(user=self.__user_list[1], passwd=self.__passwd_list[1], priv=PRIVILEGES_WRITE) + self.__change_user_priv(user=user, pre_priv=priv, invoke=True) + if user.name != "root": + user.db_set.discard(dbname) if dbname else user.db_set.clear() + time.sleep(1) + + def test_priv_change_current(self): + tdLog.printNoPrefix("==========step 1.0: if do not grant, can not read/write") + self.__user_check(user=self.root_user) + self.__user_check(user=self.users[0], check_priv=None) + + tdLog.printNoPrefix("==========step 1.1: grant read, can read, can not write") + self.grant_user(user=self.users[0], priv=PRIVILEGES_READ) + self.__user_check(user=self.users[0], check_priv=PRIVILEGES_READ) + + tdLog.printNoPrefix("==========step 1.2: grant write, can write") + self.grant_user(user=self.users[1], priv=PRIVILEGES_WRITE) + self.__user_check(user=self.users[1], check_priv=PRIVILEGES_WRITE) tdLog.printNoPrefix("==========step 1.3: grant all, can write and read") - sql = self.__grant_user_privileges(privilege=PRIVILEGES_ALL, user_name=self.__user_list[2]) - tdLog.info(sql) - tdSql.query(sql) - self.grant_check(user=self.__user_list[2], passwd=self.__passwd_list[2], priv=PRIVILEGES_ALL) + self.grant_user(user=self.users[2]) + self.__user_check(user=self.users[2], check_priv=PRIVILEGES_ALL) - tdLog.printNoPrefix("==========step 1.4: change grant read to write, can write , can not read") - sql = self.__grant_user_privileges(privilege=PRIVILEGES_WRITE, user_name=self.__user_list[0]) - tdLog.info(sql) - tdSql.query(sql) - self.grant_check(user=self.__user_list[0], passwd=self.__passwd_list[0], priv=PRIVILEGES_WRITE) + tdLog.printNoPrefix("==========step 1.4: grant read to write = all ") + self.grant_user(user=self.users[0], priv=PRIVILEGES_WRITE) + self.__user_check(user=self.users[0], check_priv=PRIVILEGES_ALL) - tdLog.printNoPrefix("==========step 1.5: change grant write to read, can not write , can read") - sql = self.__grant_user_privileges(privilege=PRIVILEGES_READ, user_name=self.__user_list[0]) - tdLog.info(sql) - tdSql.query(sql) - self.grant_check(user=self.__user_list[0], passwd=self.__passwd_list[0], priv=PRIVILEGES_READ) + tdLog.printNoPrefix("==========step 1.5: revoke write from all = read ") + self.revoke_user(user=self.users[0], priv=PRIVILEGES_WRITE) + self.__user_check(user=self.users[0], check_priv=PRIVILEGES_READ) - tdLog.printNoPrefix("==========step 1.6: change grant read to all, can write , can read") - sql = self.__grant_user_privileges(privilege=PRIVILEGES_ALL, user_name=self.__user_list[0]) - tdLog.info(sql) - tdSql.query(sql) - self.grant_check(user=self.__user_list[0], passwd=self.__passwd_list[0], priv=PRIVILEGES_ALL) + tdLog.printNoPrefix("==========step 1.6: grant write to read = all") + self.grant_user(user=self.users[1], priv=PRIVILEGES_READ) + self.__user_check(user=self.users[1], check_priv=PRIVILEGES_ALL) - tdLog.printNoPrefix("==========step 1.7: change grant all to write, can write , can not read") - sql = self.__grant_user_privileges(privilege=PRIVILEGES_WRITE, user_name=self.__user_list[0]) - tdLog.info(sql) - tdSql.query(sql) - self.grant_check(user=self.__user_list[0], passwd=self.__passwd_list[0], priv=PRIVILEGES_WRITE) + tdLog.printNoPrefix("==========step 1.7: revoke read from all = write ") + self.revoke_user(user=self.users[1], priv=PRIVILEGES_READ) + self.__user_check(user=self.users[1], check_priv=PRIVILEGES_WRITE) - tdLog.printNoPrefix("==========step 1.8: change grant write to all, can write , can read") - sql = self.__grant_user_privileges(privilege=PRIVILEGES_ALL, user_name=self.__user_list[0]) - tdLog.info(sql) - tdSql.query(sql) - self.grant_check(user=self.__user_list[0], passwd=self.__passwd_list[0], priv=PRIVILEGES_ALL) + tdLog.printNoPrefix("==========step 1.8: grant read to all = all") + self.grant_user(user=self.users[0], priv=PRIVILEGES_ALL) + self.__user_check(user=self.users[0], check_priv=PRIVILEGES_ALL) - tdLog.printNoPrefix("==========step 1.9: change grant all to read, can not write , can read") - sql = self.__grant_user_privileges(privilege=PRIVILEGES_READ, user_name=self.__user_list[0]) - tdLog.info(sql) - tdSql.query(sql) - self.grant_check(user=self.__user_list[0], passwd=self.__passwd_list[0], priv=PRIVILEGES_READ) + tdLog.printNoPrefix("==========step 1.9: grant write to all = all") + self.grant_user(user=self.users[1], priv=PRIVILEGES_ALL) + self.__user_check(user=self.users[1], check_priv=PRIVILEGES_ALL) + + tdLog.printNoPrefix("==========step 1.10: grant all to read = all") + self.grant_user(user=self.users[0], priv=PRIVILEGES_READ) + self.__user_check(user=self.users[0], check_priv=PRIVILEGES_ALL) + + tdLog.printNoPrefix("==========step 1.11: grant all to write = all") + self.grant_user(user=self.users[1], priv=PRIVILEGES_WRITE) + self.__user_check(user=self.users[1], check_priv=PRIVILEGES_ALL) + + ### init user + self.revoke_user(user=self.users[0], priv=PRIVILEGES_WRITE) + self.revoke_user(user=self.users[1], priv=PRIVILEGES_READ) + + tdLog.printNoPrefix("==========step 1.12: revoke read from write = no change") + self.revoke_user(user=self.users[1], priv=PRIVILEGES_READ) + self.__user_check(user=self.users[1], check_priv=PRIVILEGES_WRITE) + + tdLog.printNoPrefix("==========step 1.13: revoke write from read = no change") + self.revoke_user(user=self.users[0], priv=PRIVILEGES_WRITE) + self.__user_check(user=self.users[0], check_priv=PRIVILEGES_READ) + + tdLog.printNoPrefix("==========step 1.14: revoke read from read = nothing") + self.revoke_user(user=self.users[0], priv=PRIVILEGES_READ) + self.__user_check(user=self.users[0], check_priv=None) + + tdLog.printNoPrefix("==========step 1.15: revoke write from write = nothing") + self.revoke_user(user=self.users[1], priv=PRIVILEGES_WRITE) + self.__user_check(user=self.users[1], check_priv=None) + + ### init user + self.grant_user(user=self.users[0], priv=PRIVILEGES_READ) + self.revoke_user(user=self.users[1], priv=PRIVILEGES_WRITE) + + tdLog.printNoPrefix("==========step 1.16: revoke all from write = nothing") + self.revoke_user(user=self.users[1], priv=PRIVILEGES_ALL) + self.__user_check(user=self.users[1], check_priv=None) + + tdLog.printNoPrefix("==========step 1.17: revoke all from read = nothing") + self.revoke_user(user=self.users[0], priv=PRIVILEGES_ALL) + self.__user_check(user=self.users[0], check_priv=None) + + tdLog.printNoPrefix("==========step 1.18: revoke all from all = nothing") + self.revoke_user(user=self.users[2], priv=PRIVILEGES_ALL) + time.sleep(3) + self.__user_check(user=self.users[2], check_priv=None) def __grant_err(self): return [ @@ -288,13 +414,30 @@ class TDTestCase: f"GRANT {self.__privilege[0]} ON db.t1 TO {self.__user_list[0]}" , ] + def __revoke_err(self): + return [ + self.__revoke_user_privileges(privilege=self.__privilege[0], user_name="") , + self.__revoke_user_privileges(privilege=self.__privilege[0], user_name="*") , + self.__revoke_user_privileges(privilege=self.__privilege[1], dbname="not_exist_db", user_name=self.__user_list[0]), + self.__revoke_user_privileges(privilege="any_priv", user_name=self.__user_list[0]), + self.__revoke_user_privileges(privilege="", dbname="db", user_name=self.__user_list[0]) , + self.__revoke_user_privileges(privilege=" ".join(self.__privilege), user_name=self.__user_list[0]) , + f"REVOKE {self.__privilege[0]} ON * FROM {self.__user_list[0]}" , + f"REVOKE {self.__privilege[0]} ON db.t1 FROM {self.__user_list[0]}" , + ] + def test_grant_err(self): for sql in self.__grant_err(): tdSql.error(sql) - def test_grant(self): + def test_revoke_err(self): + for sql in self.__revoke_err(): + tdSql.error(sql) + + def test_change_priv(self): self.test_grant_err() - self.test_grant_current() + self.test_revoke_err() + self.test_priv_change_current() def test_user_create(self): self.create_user_current() @@ -455,7 +598,9 @@ class TDTestCase: tdSql.prepare() self.__create_tb() self.rows = 10 + self.users_count = 5 self.__insert_data(self.rows) + self.users = self.__users() tdDnodes.stop(1) tdDnodes.start(1) @@ -469,7 +614,6 @@ class TDTestCase: # root用户权限 # 创建用户测试 tdLog.printNoPrefix("==========step1: create user test") - self.users_count = 5 self.test_user_create() # 查看用户 @@ -482,7 +626,7 @@ class TDTestCase: self.login_err(self.__user_list[0], f"new{self.__passwd_list[0]}") # 用户权限设置 - self.test_grant() + self.test_change_priv() # 修改密码 tdLog.printNoPrefix("==========step3: alter user pass test") diff --git a/tests/system-test/1-insert/insertWithMoreVgroup.py b/tests/system-test/1-insert/insertWithMoreVgroup.py index f0f35831dbd5a276c98e2eede114ea14b7bcc5b2..8d2870fc2cf068153a424d2b1613188c018c6463 100644 --- a/tests/system-test/1-insert/insertWithMoreVgroup.py +++ b/tests/system-test/1-insert/insertWithMoreVgroup.py @@ -294,7 +294,7 @@ class TDTestCase: return def test_case3(self): - self.taosBenchCreate("127.0.0.1","no","db1", "stb1", 1, 8, 1*10000) + self.taosBenchCreate("127.0.0.1","no","db1", "stb1", 1, 1, 1*10) # self.taosBenchCreate("test209","no","db2", "stb2", 1, 8, 1*10000) # self.taosBenchCreate("chenhaoran02","no","db1", "stb1", 1, 8, 1*10000) @@ -349,17 +349,17 @@ class TDTestCase: # run case def run(self): - # create database and tables。 - self.test_case1() - tdLog.debug(" LIMIT test_case1 ............ [OK]") + # # create database and tables。 + # self.test_case1() + # tdLog.debug(" LIMIT test_case1 ............ [OK]") # # taosBenchmark : create database and table # self.test_case2() # tdLog.debug(" LIMIT test_case2 ............ [OK]") - # # taosBenchmark:create database/table and insert data - # self.test_case3() - # tdLog.debug(" LIMIT test_case3 ............ [OK]") + # taosBenchmark:create database/table and insert data + self.test_case3() + tdLog.debug(" LIMIT test_case3 ............ [OK]") # # test qnode diff --git a/tests/system-test/1-insert/manyVgroups.json b/tests/system-test/1-insert/manyVgroups.json index 1c9aa1f28cb0d1eba5b2cf9488dc9d5be2d3f7c2..5dea41476c8cf7777b5a548f470577e03c576663 100644 --- a/tests/system-test/1-insert/manyVgroups.json +++ b/tests/system-test/1-insert/manyVgroups.json @@ -10,7 +10,7 @@ "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, - "interlace_rows": 100000, + "interlace_rows": 0, "num_of_records_per_req": 100, "databases": [ { @@ -29,8 +29,8 @@ "batch_create_tbl_num": 50000, "data_source": "rand", "insert_mode": "taosc", - "insert_rows": 10, - "interlace_rows": 100000, + "insert_rows": 1, + "interlace_rows": 0, "insert_interval": 0, "max_sql_len": 10000000, "disorder_ratio": 0, diff --git a/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py b/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py new file mode 100644 index 0000000000000000000000000000000000000000..4c56511d2717167d243e162776d4ffe75fb056f5 --- /dev/null +++ b/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py @@ -0,0 +1,1489 @@ +################################################################### +# Copyright (c) 2021 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import traceback +import random +from taos.error import SchemalessError +import time +import numpy as np +from util.log import * +from util.cases import * +from util.sql import * +from util.common import tdCom +from util.types import TDSmlProtocolType, TDSmlTimestampType +import threading + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self._conn = conn + self.smlChildTableName_value = "id" + + def createDb(self, name="test", db_update_tag=0, protocol=None): + if protocol == "telnet-tcp": + name = "opentsdb_telnet" + + if db_update_tag == 0: + tdSql.execute(f"drop database if exists {name}") + tdSql.execute(f"create database if not exists {name} precision 'ms'") + else: + tdSql.execute(f"drop database if exists {name}") + tdSql.execute(f"create database if not exists {name} precision 'ms' update 1") + tdSql.execute(f'use {name}') + + def timeTrans(self, time_value, ts_type): + if int(time_value) == 0: + ts = time.time() + else: + if ts_type == TDSmlTimestampType.MILLI_SECOND.value or ts_type == None: + ts = int(''.join(list(filter(str.isdigit, time_value))))/1000 + elif ts_type == TDSmlTimestampType.SECOND.value: + ts = int(''.join(list(filter(str.isdigit, time_value))))/1 + ulsec = repr(ts).split('.')[1][:6] + if len(ulsec) < 6 and int(ulsec) != 0: + ulsec = int(ulsec) * (10 ** (6 - len(ulsec))) + elif int(ulsec) == 0: + ulsec *= 6 + # * follow two rows added for tsCheckCase + td_ts = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(ts)) + return td_ts + #td_ts = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(ts)) + td_ts = time.strftime("%Y-%m-%d %H:%M:%S.{}".format(ulsec), time.localtime(ts)) + return td_ts + #return repr(datetime.datetime.strptime(td_ts, "%Y-%m-%d %H:%M:%S.%f")) + + def dateToTs(self, datetime_input): + return int(time.mktime(time.strptime(datetime_input, "%Y-%m-%d %H:%M:%S.%f"))) + + def getTdTypeValue(self, value, vtype="col"): + if vtype == "col": + if value.lower().endswith("i8"): + td_type = "TINYINT" + td_tag_value = ''.join(list(value)[:-2]) + elif value.lower().endswith("i16"): + td_type = "SMALLINT" + td_tag_value = ''.join(list(value)[:-3]) + elif value.lower().endswith("i32"): + td_type = "INT" + td_tag_value = ''.join(list(value)[:-3]) + elif value.lower().endswith("i64"): + td_type = "BIGINT" + td_tag_value = ''.join(list(value)[:-3]) + elif value.lower().endswith("u64"): + td_type = "BIGINT UNSIGNED" + td_tag_value = ''.join(list(value)[:-3]) + elif value.lower().endswith("f32"): + td_type = "FLOAT" + td_tag_value = ''.join(list(value)[:-3]) + td_tag_value = '{}'.format(np.float32(td_tag_value)) + elif value.lower().endswith("f64"): + td_type = "DOUBLE" + td_tag_value = ''.join(list(value)[:-3]) + if "e" in value.lower(): + td_tag_value = str(float(td_tag_value)) + elif value.lower().startswith('l"'): + td_type = "NCHAR" + td_tag_value = ''.join(list(value)[2:-1]) + elif value.startswith('"') and value.endswith('"'): + td_type = "BINARY" + td_tag_value = ''.join(list(value)[1:-1]) + elif value.lower() == "t" or value.lower() == "true": + td_type = "BOOL" + td_tag_value = "True" + elif value.lower() == "f" or value.lower() == "false": + td_type = "BOOL" + td_tag_value = "False" + elif value.isdigit(): + td_type = "DOUBLE" + td_tag_value = str(float(value)) + else: + td_type = "DOUBLE" + if "e" in value.lower(): + td_tag_value = str(float(value)) + else: + td_tag_value = value + elif vtype == "tag": + td_type = "NCHAR" + td_tag_value = str(value) + return td_type, td_tag_value + + def typeTrans(self, type_list): + type_num_list = [] + for tp in type_list: + if tp.upper() == "TIMESTAMP": + type_num_list.append(9) + elif tp.upper() == "BOOL": + type_num_list.append(1) + elif tp.upper() == "TINYINT": + type_num_list.append(2) + elif tp.upper() == "SMALLINT": + type_num_list.append(3) + elif tp.upper() == "INT": + type_num_list.append(4) + elif tp.upper() == "BIGINT": + type_num_list.append(5) + elif tp.upper() == "FLOAT": + type_num_list.append(6) + elif tp.upper() == "DOUBLE": + type_num_list.append(7) + elif tp.upper() == "BINARY": + type_num_list.append(8) + elif tp.upper() == "NCHAR": + type_num_list.append(10) + elif tp.upper() == "BIGINT UNSIGNED": + type_num_list.append(14) + return type_num_list + + def inputHandle(self, input_sql, ts_type, protocol=None): + input_sql_split_list = input_sql.split(" ") + if protocol == "telnet-tcp": + input_sql_split_list.pop(0) + stb_name = input_sql_split_list[0] + stb_tag_list = input_sql_split_list[3:] + stb_tag_list[-1] = stb_tag_list[-1].strip() + stb_col_value = input_sql_split_list[2] + ts_value = self.timeTrans(input_sql_split_list[1], ts_type) + + tag_name_list = [] + tag_value_list = [] + td_tag_value_list = [] + td_tag_type_list = [] + + col_name_list = [] + col_value_list = [] + td_col_value_list = [] + td_col_type_list = [] + + for elm in stb_tag_list: + if self.smlChildTableName_value == "ID": + if "id=" in elm.lower(): + tb_name = elm.split('=')[1] + else: + tag_name_list.append(elm.split("=")[0].lower()) + tag_value_list.append(elm.split("=")[1]) + tb_name = "" + td_tag_value_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[1]) + td_tag_type_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[0]) + else: + if "id" == elm.split("=")[0].lower(): + tag_name_list.insert(0, elm.split("=")[0]) + tag_value_list.insert(0, elm.split("=")[1]) + td_tag_value_list.insert(0, self.getTdTypeValue(elm.split("=")[1], "tag")[1]) + td_tag_type_list.insert(0, self.getTdTypeValue(elm.split("=")[1], "tag")[0]) + else: + tag_name_list.append(elm.split("=")[0]) + tag_value_list.append(elm.split("=")[1]) + tb_name = "" + td_tag_value_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[1]) + td_tag_type_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[0]) + + col_name_list.append('_value') + col_value_list.append(stb_col_value) + + td_col_value_list.append(self.getTdTypeValue(stb_col_value)[1]) + td_col_type_list.append(self.getTdTypeValue(stb_col_value)[0]) + + final_field_list = [] + final_field_list.extend(col_name_list) + final_field_list.extend(tag_name_list) + + final_type_list = [] + final_type_list.append("TIMESTAMP") + final_type_list.extend(td_col_type_list) + final_type_list.extend(td_tag_type_list) + final_type_list = self.typeTrans(final_type_list) + + final_value_list = [] + final_value_list.append(ts_value) + final_value_list.extend(td_col_value_list) + final_value_list.extend(td_tag_value_list) + return final_value_list, final_field_list, final_type_list, stb_name, tb_name + + def genFullTypeSql(self, stb_name="", tb_name="", value="", t0="", t1="127i8", t2="32767i16", t3="2147483647i32", + t4="9223372036854775807i64", t5="11.12345f32", t6="22.123456789f64", t7="\"binaryTagValue\"", + t8="L\"ncharTagValue\"", ts="1626006833641", + id_noexist_tag=None, id_change_tag=None, id_upper_tag=None, id_mixul_tag=None, id_double_tag=None, + t_add_tag=None, t_mul_tag=None, c_multi_tag=None, c_blank_tag=None, t_blank_tag=None, + chinese_tag=None, multi_field_tag=None, point_trans_tag=None, protocol=None, tcp_keyword_tag=None): + if stb_name == "": + stb_name = tdCom.getLongName(len=6, mode="letters") + if tb_name == "": + tb_name = f'{stb_name}_{random.randint(0, 65535)}_{random.randint(0, 65535)}' + if t0 == "": + t0 = "t" + if value == "": + value = random.choice(["f", "F", "false", "False", "t", "T", "true", "True", "TRUE", "FALSE"]) + if id_upper_tag is not None: + id = "ID" + else: + id = "id" + if id_mixul_tag is not None: + id = random.choice(["iD", "Id"]) + else: + id = "id" + sql_seq = f'{stb_name} {ts} {value} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}' + if id_noexist_tag is not None: + sql_seq = f'{stb_name} {ts} {value} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}' + if t_add_tag is not None: + sql_seq = f'{stb_name} {ts} {value} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8} t9={t8}' + if id_change_tag is not None: + sql_seq = f'{stb_name} {ts} {value} t0={t0} {id}={tb_name} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}' + if id_double_tag is not None: + sql_seq = f'{stb_name} {ts} {value} {id}=\"{tb_name}_1\" t0={t0} t1={t1} {id}=\"{tb_name}_2\" t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}' + if t_add_tag is not None: + sql_seq = f'{stb_name} {ts} {value} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8} t11={t1} t10={t8}' + if t_mul_tag is not None: + sql_seq = f'{stb_name} {ts} {value} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6}' + if id_noexist_tag is not None: + sql_seq = f'{stb_name} {ts} {value} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6}' + if c_multi_tag is not None: + sql_seq = f'{stb_name} {ts} {value} {value} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6}' + if c_blank_tag is not None: + sql_seq = f'{stb_name} {ts} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}' + if t_blank_tag is not None: + sql_seq = f'{stb_name} {ts} {value}' + if chinese_tag is not None: + sql_seq = f'{stb_name} {ts} L"涛思数据" t0={t0} t1=L"涛思数据"' + if multi_field_tag is not None: + sql_seq = f'{stb_name} {ts} {value} {id}={tb_name} t0={t0} {value}' + if point_trans_tag is not None: + sql_seq = f'.point.trans.test {ts} {value} t0={t0}' + if tcp_keyword_tag is not None: + sql_seq = f'put {ts} {value} t0={t0}' + if protocol == "telnet-tcp": + sql_seq = 'put ' + sql_seq + '\n' + return sql_seq, stb_name + + def genMulTagColStr(self, genType, count=1): + """ + genType must be tag/col + """ + tag_str = "" + col_str = "" + if genType == "tag": + for i in range(0, count): + if i < (count-1): + tag_str += f't{i}=f ' + else: + tag_str += f't{i}=f' + return tag_str + if genType == "col": + col_str = "t" + return col_str + + def genLongSql(self, tag_count): + stb_name = tdCom.getLongName(7, mode="letters") + tag_str = self.genMulTagColStr("tag", tag_count) + col_str = self.genMulTagColStr("col") + ts = "1626006833641" + long_sql = stb_name + ' ' + ts + ' ' + col_str + ' ' + ' ' + tag_str + return long_sql, stb_name + + def getNoIdTbName(self, stb_name, protocol=None): + query_sql = f"select tbname from {stb_name}" + tb_name = self.resHandle(query_sql, True, protocol)[0][0] + return tb_name + + def resHandle(self, query_sql, query_tag, protocol=None): + tdSql.execute('reset query cache') + if protocol == "telnet-tcp": + time.sleep(0.5) + row_info = tdSql.query(query_sql, query_tag) + col_info = tdSql.getColNameList(query_sql, query_tag) + res_row_list = [] + sub_list = [] + for row_mem in row_info: + for i in row_mem: + sub_list.append(str(i)) + res_row_list.append(sub_list) + res_field_list_without_ts = col_info[0][1:] + res_type_list = col_info[1] + return res_row_list, res_field_list_without_ts, res_type_list + + def resCmp(self, input_sql, stb_name, query_sql="select * from", condition="", ts=None, ts_type=None, id=True, none_check_tag=None, precision=None, protocol=None): + expect_list = self.inputHandle(input_sql, ts_type, protocol) + if protocol == "telnet-tcp": + tdCom.tcpClient(input_sql) + else: + if precision == None: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, ts_type) + else: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, precision) + query_sql = f"{query_sql} {stb_name} {condition}" + res_row_list, res_field_list_without_ts, res_type_list = self.resHandle(query_sql, True, protocol) + if ts == 0: + res_ts = self.dateToTs(res_row_list[0][0]) + current_time = time.time() + if current_time - res_ts < 60: + tdSql.checkEqual(res_row_list[0][1:], expect_list[0][1:]) + else: + print("timeout") + tdSql.checkEqual(res_row_list[0], expect_list[0]) + else: + if none_check_tag is not None: + none_index_list = [i for i,x in enumerate(res_row_list[0]) if x=="None"] + none_index_list.reverse() + for j in none_index_list: + res_row_list[0].pop(j) + expect_list[0].pop(j) + tdSql.checkEqual(res_row_list[0], expect_list[0]) + tdSql.checkEqual(res_field_list_without_ts, expect_list[1]) + for i in range(len(res_type_list)): + tdSql.checkEqual(res_type_list[i], expect_list[2][i]) + + def initCheckCase(self, protocol=None): + """ + normal tags and cols, one for every elm + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(protocol=protocol) + self.resCmp(input_sql, stb_name, protocol=protocol) + + def boolTypeCheckCase(self, protocol=None): + """ + check all normal type + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + full_type_list = ["f", "F", "false", "False", "t", "T", "true", "True"] + for t_type in full_type_list: + input_sql, stb_name = self.genFullTypeSql(t0=t_type, protocol=protocol) + self.resCmp(input_sql, stb_name, protocol=protocol) + + def symbolsCheckCase(self, protocol=None): + """ + check symbols = `~!@#$%^&*()_-+={[}]\|:;'\",<.>/? + """ + ''' + please test : + binary_symbols = '\"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"\'\'"\"' + ''' + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + binary_symbols = '"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"' + nchar_symbols = f'L{binary_symbols}' + input_sql1, stb_name1 = self.genFullTypeSql(value=binary_symbols, t7=binary_symbols, t8=nchar_symbols, protocol=protocol) + input_sql2, stb_name2 = self.genFullTypeSql(value=nchar_symbols, t7=binary_symbols, t8=nchar_symbols, protocol=protocol) + self.resCmp(input_sql1, stb_name1, protocol=protocol) + self.resCmp(input_sql2, stb_name2, protocol=protocol) + + def tsCheckCase(self): + """ + test ts list --> ["1626006833640ms", "1626006834s", "1626006822639022"] + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(ts=1626006833640) + self.resCmp(input_sql, stb_name, ts_type=TDSmlTimestampType.MILLI_SECOND.value) + input_sql, stb_name = self.genFullTypeSql(ts=1626006833640) + self.resCmp(input_sql, stb_name, ts_type=None) + input_sql, stb_name = self.genFullTypeSql(ts=1626006834) + self.resCmp(input_sql, stb_name, ts_type=TDSmlTimestampType.SECOND.value) + + tdSql.execute(f"drop database if exists test_ts") + tdSql.execute(f"create database if not exists test_ts precision 'ms'") + tdSql.execute("use test_ts") + input_sql = ['test_ms 1626006833640 t t0=t', 'test_ms 1626006833641 f t0=t'] + self._conn.schemaless_insert(input_sql, TDSmlProtocolType.TELNET.value, None) + res = tdSql.query('select * from test_ms', True) + tdSql.checkEqual(str(res[0][0]), "2021-07-11 20:33:53.640000") + tdSql.checkEqual(str(res[1][0]), "2021-07-11 20:33:53.641000") + + def openTstbTelnetTsCheckCase(self): + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql = f'{tdCom.getLongName(len=10, mode="letters")} 0 127 t0=127 t1=32767I16 t2=2147483647I32 t3=9223372036854775807 t4=11.12345027923584F32 t5=22.123456789F64' + stb_name = input_sql.split(" ")[0] + self.resCmp(input_sql, stb_name, ts=0) + input_sql = f'{tdCom.getLongName(len=10, mode="letters")} 1626006833640 127 t0=127 t1=32767I16 t2=2147483647I32 t3=9223372036854775807 t4=11.12345027923584F32 t5=22.123456789F64' + stb_name = input_sql.split(" ")[0] + self.resCmp(input_sql, stb_name, ts_type=TDSmlTimestampType.MILLI_SECOND.value) + input_sql = f'{tdCom.getLongName(len=10, mode="letters")} 1626006834 127 t0=127 t1=32767I16 t2=2147483647I32 t3=9223372036854775807 t4=11.12345027923584F32 t5=22.123456789F64' + stb_name = input_sql.split(" ")[0] + self.resCmp(input_sql, stb_name, ts_type=TDSmlTimestampType.SECOND.value) + for ts in [1, 12, 123, 1234, 12345, 123456, 1234567, 12345678, 162600683, 16260068341, 162600683412, 16260068336401]: + try: + input_sql = f'{tdCom.getLongName(len=10, mode="letters")} {ts} 127 t0=127 t1=32767I16 t2=2147483647I32 t3=9223372036854775807 t4=11.12345027923584F32 t5=22.123456789F64' + self._conn.schemaless_insert(input_sql, TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def idSeqCheckCase(self, protocol=None): + """ + check id.index in tags + eg: t0=**,id=**,t1=** + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(id_change_tag=True, protocol=protocol) + self.resCmp(input_sql, stb_name, protocol=protocol) + + def idLetterCheckCase(self, protocol=None): + """ + check id param + eg: id and ID + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(id_upper_tag=True, protocol=protocol) + self.resCmp(input_sql, stb_name, protocol=protocol) + input_sql, stb_name = self.genFullTypeSql(id_mixul_tag=True, protocol=protocol) + self.resCmp(input_sql, stb_name, protocol=protocol) + input_sql, stb_name = self.genFullTypeSql(id_change_tag=True, id_upper_tag=True, protocol=protocol) + self.resCmp(input_sql, stb_name, protocol=protocol) + + def noIdCheckCase(self, protocol=None): + """ + id not exist + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(id_noexist_tag=True, protocol=protocol) + self.resCmp(input_sql, stb_name, protocol=protocol) + query_sql = f"select tbname from {stb_name}" + res_row_list = self.resHandle(query_sql, True)[0] + if len(res_row_list[0][0]) > 0: + tdSql.checkColNameList(res_row_list, res_row_list) + else: + tdSql.checkColNameList(res_row_list, "please check noIdCheckCase") + + def maxColTagCheckCase(self): + """ + max tag count is 128 + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + for input_sql in [self.genLongSql(128)[0]]: + tdCom.cleanTb() + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + for input_sql in [self.genLongSql(129)[0]]: + tdCom.cleanTb() + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def stbTbNameCheckCase(self, protocol=None): + """ + test illegal id name + mix "`~!@#$¥%^&*()-+{}|[]、「」【】:;《》<>?" + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + rstr = list("~!@#$¥%^&*()-+{}|[]、「」【】:;《》<>?") + for i in rstr: + input_sql, stb_name = self.genFullTypeSql(tb_name=f"\"aaa{i}bbb\"", protocol=protocol) + self.resCmp(input_sql, f'`{stb_name}`', protocol=protocol) + tdSql.execute(f'drop table if exists `{stb_name}`') + + def idStartWithNumCheckCase(self, protocol=None): + """ + id is start with num + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(tb_name="1aaabbb", protocol=protocol) + self.resCmp(input_sql, stb_name, protocol=protocol) + + def nowTsCheckCase(self): + """ + check now unsupported + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql = self.genFullTypeSql(ts="now")[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def dateFormatTsCheckCase(self): + """ + check date format ts unsupported + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql = self.genFullTypeSql(ts="2021-07-21\ 19:01:46.920")[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def illegalTsCheckCase(self): + """ + check ts format like 16260068336390us19 + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql = self.genFullTypeSql(ts="16260068336390us19")[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def tbnameCheckCase(self): + """ + check length 192 + check upper tbname + chech upper tag + length of stb_name tb_name <= 192 + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + stb_name_192 = tdCom.getLongName(len=192, mode="letters") + tb_name_192 = tdCom.getLongName(len=192, mode="letters") + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name_192, tb_name=tb_name_192) + self.resCmp(input_sql, stb_name) + tdSql.query(f'select * from {stb_name}') + tdSql.checkRows(1) + if self.smlChildTableName_value == "ID": + for input_sql in [self.genFullTypeSql(stb_name=tdCom.getLongName(len=193, mode="letters"), tb_name=tdCom.getLongName(len=5, mode="letters"))[0], self.genFullTypeSql(tb_name=tdCom.getLongName(len=193, mode="letters"))[0]]: + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + input_sql = 'Abcdffgg 1626006833640 False T1=127i8 id=Abcddd' + else: + input_sql = self.genFullTypeSql(stb_name=tdCom.getLongName(len=193, mode="letters"), tb_name=tdCom.getLongName(len=5, mode="letters"))[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + input_sql = 'Abcdffgg 1626006833640 False T1=127i8' + stb_name = f'`{input_sql.split(" ")[0]}`' + self.resCmp(input_sql, stb_name) + tdSql.execute('drop table `Abcdffgg`') + + def tagNameLengthCheckCase(self): + """ + check tag name limit <= 62 + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tag_name = tdCom.getLongName(61, "letters") + tag_name = f'T{tag_name}' + stb_name = tdCom.getLongName(7, "letters") + input_sql = f'{stb_name} 1626006833640 L"bcdaaa" {tag_name}=f' + self.resCmp(input_sql, stb_name) + input_sql = f'{stb_name} 1626006833640 L"gggcdaaa" {tdCom.getLongName(65, "letters")}=f' + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def tagValueLengthCheckCase(self): + """ + check full type tag value limit + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + # nchar + # * legal nchar could not be larger than 16374/4 + stb_name = tdCom.getLongName(7, "letters") + input_sql = f'{stb_name} 1626006833640 t t0=t t1={tdCom.getLongName(4093, "letters")}' + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + + input_sql = f'{stb_name} 1626006833640 t t0=t t1={tdCom.getLongName(4094, "letters")}' + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def colValueLengthCheckCase(self): + """ + check full type col value limit + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + # i8 + for value in ["-128i8", "127i8"]: + input_sql, stb_name = self.genFullTypeSql(value=value) + self.resCmp(input_sql, stb_name) + tdCom.cleanTb() + for value in ["-129i8", "128i8"]: + input_sql = self.genFullTypeSql(value=value)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + # i16 + tdCom.cleanTb() + for value in ["-32768i16"]: + input_sql, stb_name = self.genFullTypeSql(value=value) + self.resCmp(input_sql, stb_name) + tdCom.cleanTb() + for value in ["-32769i16", "32768i16"]: + input_sql = self.genFullTypeSql(value=value)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # i32 + tdCom.cleanTb() + for value in ["-2147483648i32"]: + input_sql, stb_name = self.genFullTypeSql(value=value) + self.resCmp(input_sql, stb_name) + tdCom.cleanTb() + for value in ["-2147483649i32", "2147483648i32"]: + input_sql = self.genFullTypeSql(value=value)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # i64 + tdCom.cleanTb() + for value in ["-9223372036854775808i64"]: + input_sql, stb_name = self.genFullTypeSql(value=value) + self.resCmp(input_sql, stb_name) + tdCom.cleanTb() + for value in ["-9223372036854775809i64", "9223372036854775808i64"]: + input_sql = self.genFullTypeSql(value=value)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # f32 + tdCom.cleanTb() + for value in [f"{-3.4028234663852885981170418348451692544*(10**38)}f32", f"{3.4028234663852885981170418348451692544*(10**38)}f32"]: + input_sql, stb_name = self.genFullTypeSql(value=value) + self.resCmp(input_sql, stb_name) + # * limit set to 4028234664*(10**38) + tdCom.cleanTb() + for value in [f"{-3.4028234664*(10**38)}f32", f"{3.4028234664*(10**38)}f32"]: + input_sql = self.genFullTypeSql(value=value)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # f64 + tdCom.cleanTb() + for value in [f'{-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)}f64', f'{-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)}f64']: + input_sql, stb_name = self.genFullTypeSql(value=value) + self.resCmp(input_sql, stb_name) + # # * limit set to 1.797693134862316*(10**308) + # tdCom.cleanTb() + # for value in [f'{-1.797693134862316*(10**308)}f64', f'{-1.797693134862316*(10**308)}f64']: + # input_sql = self.genFullTypeSql(value=value)[0] + # try: + # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + # raise Exception("should not reach here") + # except SchemalessError as err: + # tdSql.checkNotEqual(err.errno, 0) + + # # # binary + # tdCom.cleanTb() + # stb_name = tdCom.getLongName(7, "letters") + # input_sql = f'{stb_name} 1626006833640 "{tdCom.getLongName(16374, "letters")}" t0=t' + # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + + # tdCom.cleanTb() + # input_sql = f'{stb_name} 1626006833640 "{tdCom.getLongName(16375, "letters")}" t0=t' + # try: + # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + # raise Exception("should not reach here") + # except SchemalessError as err: + # tdSql.checkNotEqual(err.errno, 0) + + # # nchar + # # * legal nchar could not be larger than 16374/4 + # tdCom.cleanTb() + # stb_name = tdCom.getLongName(7, "letters") + # input_sql = f'{stb_name} 1626006833640 L"{tdCom.getLongName(4093, "letters")}" t0=t' + # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + + # tdCom.cleanTb() + # input_sql = f'{stb_name} 1626006833640 L"{tdCom.getLongName(4094, "letters")}" t0=t' + # try: + # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + # raise Exception("should not reach here") + # except SchemalessError as err: + # tdSql.checkNotEqual(err.errno, 0) + + def tagColIllegalValueCheckCase(self): + + """ + test illegal tag col value + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + # bool + for i in ["TrUe", "tRue", "trUe", "truE", "FalsE", "fAlse", "faLse", "falSe", "falsE"]: + input_sql1, stb_name = self.genFullTypeSql(t0=i) + self.resCmp(input_sql1, stb_name) + input_sql2, stb_name = self.genFullTypeSql(value=i) + self.resCmp(input_sql2, stb_name) + + # i8 i16 i32 i64 f32 f64 + for input_sql in [ + self.genFullTypeSql(value="1s2i8")[0], + self.genFullTypeSql(value="1s2i16")[0], + self.genFullTypeSql(value="1s2i32")[0], + self.genFullTypeSql(value="1s2i64")[0], + self.genFullTypeSql(value="11.1s45f32")[0], + self.genFullTypeSql(value="11.1s45f64")[0], + ]: + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # check accepted binary and nchar symbols + # # * ~!@#$¥%^&*()-+={}|[]、「」:; + for symbol in list('~!@#$¥%^&*()-+={}|[]、「」:;'): + input_sql1 = f'{tdCom.getLongName(7, "letters")} 1626006833640 "abc{symbol}aaa" t0=t' + input_sql2 = f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0=t t1="abc{symbol}aaa"' + self._conn.schemaless_insert([input_sql1], TDSmlProtocolType.TELNET.value, None) + # self._conn.schemaless_insert([input_sql2], TDSmlProtocolType.TELNET.value, None) + + def blankCheckCase(self): + ''' + check blank case + ''' + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + # input_sql_list = [f'{tdCom.getLongName(7, "letters")} 1626006833640 "abc aaa" t0=t', + # f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0="abaaa"', + # f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0=L"abaaa"', + # f'{tdCom.getLongName(7, "letters")} 1626006833640 L"aba aa" t0=L"abcaaa3" '] + input_sql_list = [f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0="abaaa"', + f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0=L"abaaa"'] + for input_sql in input_sql_list: + stb_name = input_sql.split(" ")[0] + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + tdSql.query(f'select * from {stb_name}') + tdSql.checkRows(1) + + def duplicateIdTagColInsertCheckCase(self): + """ + check duplicate Id Tag Col + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql_id = self.genFullTypeSql(id_double_tag=True)[0] + try: + self._conn.schemaless_insert([input_sql_id], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + input_sql = self.genFullTypeSql()[0] + input_sql_tag = input_sql.replace("t5", "t6") + try: + self._conn.schemaless_insert([input_sql_tag], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + ##### stb exist ##### + @tdCom.smlPass + def noIdStbExistCheckCase(self): + """ + case no id when stb exist + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(tb_name="sub_table_0123456", t0="f", value="f") + self.resCmp(input_sql, stb_name) + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, id_noexist_tag=True, t0="f", value="f") + self.resCmp(input_sql, stb_name, condition='where tbname like "t_%"') + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + + def duplicateInsertExistCheckCase(self): + """ + check duplicate insert when stb exist + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql() + self.resCmp(input_sql, stb_name) + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + self.resCmp(input_sql, stb_name) + + @tdCom.smlPass + def tagColBinaryNcharLengthCheckCase(self): + """ + check length increase + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql() + self.resCmp(input_sql, stb_name) + tb_name = tdCom.getLongName(5, "letters") + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name,t7="\"binaryTagValuebinaryTagValue\"", t8="L\"ncharTagValuencharTagValue\"") + self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"') + + @tdCom.smlPass + def tagColAddDupIDCheckCase(self): + """ + check tag count add, stb and tb duplicate + * tag: alter table ... + * col: when update==0 and ts is same, unchange + * so this case tag&&value will be added, + * col is added without value when update==0 + * col is added with value when update==1 + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + for db_update_tag in [0, 1]: + if db_update_tag == 1 : + self.createDb("test_update", db_update_tag=db_update_tag) + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="t", value="t") + self.resCmp(input_sql, stb_name) + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t0="t", value="f", t_add_tag=True) + if db_update_tag == 1 : + self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"', none_check_tag=True) + tdSql.query(f'select * from {stb_name} where tbname like "{tb_name}"') + tdSql.checkData(0, 11, None) + tdSql.checkData(0, 12, None) + else: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + tdSql.query(f'select * from {stb_name} where tbname like "{tb_name}"') + tdSql.checkData(0, 1, True) + tdSql.checkData(0, 11, None) + tdSql.checkData(0, 12, None) + self.createDb() + + @tdCom.smlPass + def tagColAddCheckCase(self): + """ + check tag count add + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="f", value="f") + self.resCmp(input_sql, stb_name) + tb_name_1 = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name_1, t0="f", value="f", t_add_tag=True) + self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name_1}"') + res_row_list = self.resHandle(f"select t10,t11 from {tb_name}", True)[0] + tdSql.checkEqual(res_row_list[0], ['None', 'None']) + self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"', none_check_tag=True) + + def tagMd5Check(self): + """ + condition: stb not change + insert two table, keep tag unchange, change col + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(t0="f", value="f", id_noexist_tag=True) + self.resCmp(input_sql, stb_name) + tb_name1 = self.getNoIdTbName(stb_name) + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, t0="f", value="f", id_noexist_tag=True) + self.resCmp(input_sql, stb_name) + tb_name2 = self.getNoIdTbName(stb_name) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(1) + tdSql.checkEqual(tb_name1, tb_name2) + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, t0="f", value="f", id_noexist_tag=True, t_add_tag=True) + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + tb_name3 = self.getNoIdTbName(stb_name) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + tdSql.checkNotEqual(tb_name1, tb_name3) + + # * tag nchar max is 16374/4, col+ts nchar max 49151 + def tagColNcharMaxLengthCheckCase(self): + """ + check nchar length limit + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + stb_name = tdCom.getLongName(7, "letters") + input_sql = f'{stb_name} 1626006833640 f t2={tdCom.getLongName(1, "letters")}' + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + + # * legal nchar could not be larger than 16374/4 + input_sql = f'{stb_name} 1626006833640 f t1={tdCom.getLongName(4093, "letters")} t2={tdCom.getLongName(1, "letters")}' + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + input_sql = f'{stb_name} 1626006833640 f t1={tdCom.getLongName(4093, "letters")} t2={tdCom.getLongName(2, "letters")}' + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + + def batchInsertCheckCase(self): + """ + test batch insert + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + stb_name = tdCom.getLongName(8, "letters") + tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)') + + lines = ["st123456 1626006833640 1i64 t1=3i64 t2=4f64 t3=\"t3\"", + "st123456 1626006833641 2i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64", + f'{stb_name} 1626006833642 3i64 t2=5f64 t3=L\"ste\"', + "stf567890 1626006833643 4i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64", + "st123456 1626006833644 5i64 t1=4i64 t2=5f64 t3=\"t4\"", + f'{stb_name} 1626006833645 6i64 t2=5f64 t3=L\"ste2\"', + f'{stb_name} 1626006833646 7i64 t2=5f64 t3=L\"ste2\"', + "st123456 1626006833647 8i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64", + "st123456 1626006833648 9i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64" + ] + self._conn.schemaless_insert(lines, TDSmlProtocolType.TELNET.value, TDSmlTimestampType.MILLI_SECOND.value) + tdSql.query('show stables') + tdSql.checkRows(3) + tdSql.query('show tables') + tdSql.checkRows(6) + tdSql.query('select * from st123456') + tdSql.checkRows(5) + + def multiInsertCheckCase(self, count): + """ + test multi insert + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + sql_list = [] + stb_name = tdCom.getLongName(8, "letters") + tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 nchar(10))') + for i in range(count): + input_sql = self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True)[0] + sql_list.append(input_sql) + self._conn.schemaless_insert(sql_list, TDSmlProtocolType.TELNET.value, TDSmlTimestampType.MILLI_SECOND.value) + tdSql.query('show tables') + tdSql.checkRows(count) + + def batchErrorInsertCheckCase(self): + """ + test batch error insert + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + stb_name = tdCom.getLongName(8, "letters") + lines = ["st123456 1626006833640 3i 64 t1=3i64 t2=4f64 t3=\"t3\"", + f"{stb_name} 1626056811823316532ns tRue t2=5f64 t3=L\"ste\""] + try: + self._conn.schemaless_insert(lines, TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def multiColsInsertCheckCase(self): + """ + test multi cols insert + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql = self.genFullTypeSql(c_multi_tag=True)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def blankColInsertCheckCase(self): + """ + test blank col insert + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql = self.genFullTypeSql(c_blank_tag=True)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def blankTagInsertCheckCase(self): + """ + test blank tag insert + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql = self.genFullTypeSql(t_blank_tag=True)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def chineseCheckCase(self): + """ + check nchar ---> chinese + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(chinese_tag=True) + self.resCmp(input_sql, stb_name) + + def multiFieldCheckCase(self): + ''' + multi_field + ''' + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql = self.genFullTypeSql(multi_field_tag=True)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def spellCheckCase(self): + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + stb_name = tdCom.getLongName(8, "letters") + input_sql_list = [f'{stb_name}_1 1626006833640 127I8 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_2 1626006833640 32767I16 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_3 1626006833640 2147483647I32 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_4 1626006833640 9223372036854775807I64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_5 1626006833640 11.12345027923584F32 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_6 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_7 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_8 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_9 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_10 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64'] + for input_sql in input_sql_list: + stb_name = input_sql.split(' ')[0] + self.resCmp(input_sql, stb_name) + + def pointTransCheckCase(self, protocol=None): + """ + metric value "." trans to "_" + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql = self.genFullTypeSql(point_trans_tag=True, protocol=protocol)[0] + if protocol == 'telnet-tcp': + stb_name = f'`{input_sql.split(" ")[1]}`' + else: + stb_name = f'`{input_sql.split(" ")[0]}`' + self.resCmp(input_sql, stb_name, protocol=protocol) + tdSql.execute("drop table `.point.trans.test`") + + def defaultTypeCheckCase(self): + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + stb_name = tdCom.getLongName(8, "letters") + input_sql_list = [f'{stb_name}_1 1626006833640 9223372036854775807 t0=f t1=127 t2=32767i16 t3=2147483647i32 t4=9223372036854775807 t5=11.12345f32 t6=22.123456789f64 t7="vozamcts" t8=L"ncharTagValue"', \ + f'{stb_name}_2 1626006833641 22.123456789 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789 t7="vozamcts" t8=L"ncharTagValue"', \ + f'{stb_name}_3 1626006833642 10e5F32 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=10e5F64 t7="vozamcts" t8=L"ncharTagValue"', \ + f'{stb_name}_4 1626006833643 10.0e5F64 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=10.0e5F32 t7="vozamcts" t8=L"ncharTagValue"', \ + f'{stb_name}_5 1626006833644 -10.0e5 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=-10.0e5 t7="vozamcts" t8=L"ncharTagValue"'] + for input_sql in input_sql_list: + stb_name = input_sql.split(" ")[0] + self.resCmp(input_sql, stb_name) + + def tbnameTagsColsNameCheckCase(self): + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + if self.smlChildTableName_value == "ID": + input_sql = 'rFa$sta 1626006834 9223372036854775807 id=rFas$ta_1 Tt!0=true tT@1=127Ii8 t#2=32767i16 "t$3"=2147483647i32 t%4=9223372036854775807i64 t^5=11.12345f32 t&6=22.123456789f64 t*7=\"ddzhiksj\" t!@#$%^&*()_+[];:<>?,9=L\"ncharTagValue\"' + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + query_sql = 'select * from `rFa$sta`' + query_res = tdSql.query(query_sql, True) + tdSql.checkEqual(query_res, [(datetime.datetime(2021, 7, 11, 20, 33, 54), 9.223372036854776e+18, 'true', '127Ii8', '32767i16', '2147483647i32', '9223372036854775807i64', '11.12345f32', '22.123456789f64', '"ddzhiksj"', 'L"ncharTagValue"')]) + col_tag_res = tdSql.getColNameList(query_sql) + tdSql.checkEqual(col_tag_res, ['ts', '_value', 'tt!0', 'tt@1', 't#2', '"t$3"', 't%4', 't^5', 't&6', 't*7', 't!@#$%^&*()_+[];:<>?,9']) + tdSql.execute('drop table `rFa$sta`') + else: + input_sql = 'rFa$sta 1626006834 9223372036854775807 Tt!0=true tT@1=127Ii8 t#2=32767i16 "t$3"=2147483647i32 t%4=9223372036854775807i64 t^5=11.12345f32 t&6=22.123456789f64 t*7=\"ddzhiksj\" t!@#$%^&*()_+[];:<>?,9=L\"ncharTagValue\"' + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + query_sql = 'select * from `rFa$sta`' + query_res = tdSql.query(query_sql, True) + tdSql.checkEqual(query_res, [(datetime.datetime(2021, 7, 11, 20, 33, 54), 9.223372036854776e+18, '2147483647i32', 'L"ncharTagValue"', '32767i16', '9223372036854775807i64', '22.123456789f64', '"ddzhiksj"', '11.12345f32', 'true', '127Ii8')]) + col_tag_res = tdSql.getColNameList(query_sql) + tdSql.checkEqual(col_tag_res, ['_ts', '_value', '"t$3"', 't!@#$%^&*()_+[];:<>?,9', 't#2', 't%4', 't&6', 't*7', 't^5', 'Tt!0', 'tT@1']) + tdSql.execute('drop table `rFa$sta`') + + def tcpKeywordsCheckCase(self, protocol="telnet-tcp"): + """ + stb = "put" + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql = self.genFullTypeSql(tcp_keyword_tag=True, protocol=protocol)[0] + stb_name = f'`{input_sql.split(" ")[1]}`' + self.resCmp(input_sql, stb_name, protocol=protocol) + + def genSqlList(self, count=5, stb_name="", tb_name=""): + """ + stb --> supertable + tb --> table + ts --> timestamp, same default + col --> column, same default + tag --> tag, same default + d --> different + s --> same + a --> add + m --> minus + """ + d_stb_d_tb_list = list() + s_stb_s_tb_list = list() + s_stb_s_tb_a_tag_list = list() + s_stb_s_tb_m_tag_list = list() + s_stb_d_tb_list = list() + s_stb_d_tb_m_tag_list = list() + s_stb_d_tb_a_tag_list = list() + s_stb_s_tb_d_ts_list = list() + s_stb_s_tb_d_ts_m_tag_list = list() + s_stb_s_tb_d_ts_a_tag_list = list() + s_stb_d_tb_d_ts_list = list() + s_stb_d_tb_d_ts_m_tag_list = list() + s_stb_d_tb_d_ts_a_tag_list = list() + for i in range(count): + d_stb_d_tb_list.append(self.genFullTypeSql(t0="f", value="f")) + s_stb_s_tb_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"')) + s_stb_s_tb_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', t_add_tag=True)) + s_stb_s_tb_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', t_mul_tag=True)) + s_stb_d_tb_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True)) + s_stb_d_tb_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, t_mul_tag=True)) + s_stb_d_tb_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, t_add_tag=True)) + s_stb_s_tb_d_ts_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', ts=0)) + s_stb_s_tb_d_ts_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', ts=0, t_mul_tag=True)) + s_stb_s_tb_d_ts_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', ts=0, t_add_tag=True)) + s_stb_d_tb_d_ts_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0)) + s_stb_d_tb_d_ts_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0, t_mul_tag=True)) + s_stb_d_tb_d_ts_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0, t_add_tag=True)) + + return d_stb_d_tb_list, s_stb_s_tb_list, s_stb_s_tb_a_tag_list, s_stb_s_tb_m_tag_list, \ + s_stb_d_tb_list, s_stb_d_tb_m_tag_list, s_stb_d_tb_a_tag_list, s_stb_s_tb_d_ts_list, \ + s_stb_s_tb_d_ts_m_tag_list, s_stb_s_tb_d_ts_a_tag_list, s_stb_d_tb_d_ts_list, \ + s_stb_d_tb_d_ts_m_tag_list, s_stb_d_tb_d_ts_a_tag_list + + + def genMultiThreadSeq(self, sql_list): + tlist = list() + for insert_sql in sql_list: + t = threading.Thread(target=self._conn.schemaless_insert,args=([insert_sql[0]], TDSmlProtocolType.TELNET.value, None)) + tlist.append(t) + return tlist + + def multiThreadRun(self, tlist): + for t in tlist: + t.start() + for t in tlist: + t.join() + + def stbInsertMultiThreadCheckCase(self): + """ + thread input different stb + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql = self.genSqlList()[0] + print(input_sql) + self.multiThreadRun(self.genMultiThreadSeq(input_sql)) + tdSql.query(f"show tables;") + tdSql.checkRows(5) + + def sStbStbDdataInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different data, result keep first data + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_s_tb_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[1] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6) + if self.smlChildTableName_value == "ID": + expected_tb_name = self.getNoIdTbName(stb_name)[0] + tdSql.checkEqual(tb_name, expected_tb_name) + tdSql.query(f"select * from {stb_name};") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6) + + def sStbStbDdataAtInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different data, add columes and tags, result keep first data + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_s_tb_a_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[2] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_a_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6) + if self.smlChildTableName_value == "ID": + expected_tb_name = self.getNoIdTbName(stb_name)[0] + tdSql.checkEqual(tb_name, expected_tb_name) + tdSql.query(f"select * from {stb_name};") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6) + + def sStbStbDdataMtInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different data, minus columes and tags, result keep first data + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_s_tb_m_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[3] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_m_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(2) + if self.smlChildTableName_value == "ID": + expected_tb_name = self.getNoIdTbName(stb_name)[0] + tdSql.checkEqual(tb_name, expected_tb_name) + tdSql.query(f"select * from {stb_name};") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(2) + + def sStbDtbDdataInsertMultiThreadCheckCase(self): + """ + thread input same stb, different tb, different data + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_d_tb_list = self.genSqlList(stb_name=stb_name)[4] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(6) + + def sStbDtbDdataMtInsertMultiThreadCheckCase(self): + """ + thread input same stb, different tb, different data, add col, mul tag + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_d_tb_m_tag_list = [(f'{stb_name} 1626006833640 "omfdhyom" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \ + (f'{stb_name} 1626006833640 "vqowydbc" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \ + (f'{stb_name} 1626006833640 "plgkckpv" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \ + (f'{stb_name} 1626006833640 "cujyqvlj" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \ + (f'{stb_name} 1626006833640 "twjxisat" t0=T t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz')] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_m_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(3) + + def sStbDtbDdataAtInsertMultiThreadCheckCase(self): + """ + thread input same stb, different tb, different data, add tag, mul col + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_d_tb_a_tag_list = self.genSqlList(stb_name=stb_name)[6] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_a_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(6) + + def sStbStbDdataDtsInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different ts + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_s_tb_d_ts_list = [(f'{stb_name} 0 "hkgjiwdj" id={tb_name} t0=f t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="vozamcts" t8=L"ncharTagValue"', 'dwpthv'), \ + (f'{stb_name} 0 "rljjrrul" id={tb_name} t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="bmcanhbs" t8=L"ncharTagValue"', 'dwpthv'), \ + (f'{stb_name} 0 "basanglx" id={tb_name} t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="enqkyvmb" t8=L"ncharTagValue"', 'dwpthv'), \ + (f'{stb_name} 0 "clsajzpp" id={tb_name} t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="eivaegjk" t8=L"ncharTagValue"', 'dwpthv'), \ + (f'{stb_name} 0 "jitwseso" id={tb_name} t0=T t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="yhlwkddq" t8=L"ncharTagValue"', 'dwpthv')] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(6) + + def sStbStbDdataDtsMtInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different ts, add col, mul tag + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_s_tb_d_ts_m_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[8] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_m_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(2) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(6) + tdSql.query(f"select * from {stb_name} where t8 is not NULL") + tdSql.checkRows(6) if self.smlChildTableName_value == "ID" else tdSql.checkRows(1) + + def sStbStbDdataDtsAtInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different ts, add tag, mul col + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_s_tb_d_ts_a_tag_list = [(f'{stb_name} 0 "clummqfy" id={tb_name} t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="hpxzrdiw" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl'), \ + (f'{stb_name} 0 "yqeztggb" id={tb_name} t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="gdtblmrc" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl'), \ + (f'{stb_name} 0 "gbkinqdk" id={tb_name} t0=f t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="iqniuvco" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl'), \ + (f'{stb_name} 0 "ldxxejbd" id={tb_name} t0=f t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="vxkipags" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl'), \ + (f'{stb_name} 0 "tlvzwjes" id={tb_name} t0=true t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="enwrlrtj" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl')] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_a_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(6) + for t in ["t10", "t11"]: + tdSql.query(f"select * from {stb_name} where {t} is not NULL;") + tdSql.checkRows(0) if self.smlChildTableName_value == "ID" else tdSql.checkRows(5) + + def sStbDtbDdataDtsInsertMultiThreadCheckCase(self): + """ + thread input same stb, different tb, data, ts + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_d_tb_d_ts_list = self.genSqlList(stb_name=stb_name)[10] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_d_ts_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(6) + + def sStbDtbDdataDtsMtInsertMultiThreadCheckCase(self): + """ + thread input same stb, different tb, data, ts, add col, mul tag + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_d_tb_d_ts_m_tag_list = [(f'{stb_name} 0 "mnpmtzul" t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \ + (f'{stb_name} 0 "zbvwckcd" t0=True t1=126i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \ + (f'{stb_name} 0 "vymcjfwc" t0=False t1=125i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \ + (f'{stb_name} 0 "laumkwfn" t0=False t1=124i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \ + (f'{stb_name} 0 "nyultzxr" t0=false t1=123i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg')] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_d_ts_m_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(6) + + def test(self): + try: + input_sql = f'test_nchar 0 L"涛思数据" t0=f t1=L"涛思数据" t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64' + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + except SchemalessError as err: + print(err.errno) + + def runAll(self): + self.initCheckCase() + self.boolTypeCheckCase() + self.symbolsCheckCase() + self.tsCheckCase() + self.openTstbTelnetTsCheckCase() + self.idSeqCheckCase() + self.idLetterCheckCase() + self.noIdCheckCase() + self.maxColTagCheckCase() + self.stbTbNameCheckCase() + self.idStartWithNumCheckCase() + self.nowTsCheckCase() + self.dateFormatTsCheckCase() + self.illegalTsCheckCase() + self.tbnameCheckCase() + self.tagNameLengthCheckCase() + # self.tagValueLengthCheckCase() + self.colValueLengthCheckCase() + self.tagColIllegalValueCheckCase() + self.blankCheckCase() + self.duplicateIdTagColInsertCheckCase() + self.noIdStbExistCheckCase() + self.duplicateInsertExistCheckCase() + self.tagColBinaryNcharLengthCheckCase() + self.tagColAddDupIDCheckCase() + self.tagColAddCheckCase() + self.tagMd5Check() + # self.tagColNcharMaxLengthCheckCase() + # self.batchInsertCheckCase() + # self.multiInsertCheckCase(10) + self.batchErrorInsertCheckCase() + self.multiColsInsertCheckCase() + self.blankColInsertCheckCase() + self.blankTagInsertCheckCase() + self.chineseCheckCase() + self.multiFieldCheckCase() + self.spellCheckCase() + self.pointTransCheckCase() + self.defaultTypeCheckCase() + self.tbnameTagsColsNameCheckCase() + # # # MultiThreads + # self.stbInsertMultiThreadCheckCase() + # self.sStbStbDdataInsertMultiThreadCheckCase() + # self.sStbStbDdataAtInsertMultiThreadCheckCase() + # self.sStbStbDdataMtInsertMultiThreadCheckCase() + # self.sStbDtbDdataInsertMultiThreadCheckCase() + # self.sStbDtbDdataMtInsertMultiThreadCheckCase() + # self.sStbDtbDdataAtInsertMultiThreadCheckCase() + # self.sStbStbDdataDtsInsertMultiThreadCheckCase() + # # self.sStbStbDdataDtsMtInsertMultiThreadCheckCase() + # self.sStbStbDdataDtsAtInsertMultiThreadCheckCase() + # self.sStbDtbDdataDtsInsertMultiThreadCheckCase() + # self.sStbDtbDdataDtsMtInsertMultiThreadCheckCase() + + def run(self): + print("running {}".format(__file__)) + + try: + self.createDb() + self.runAll() + # self.createDb(protocol="telnet-tcp") + # self.initCheckCase('telnet-tcp') + # self.boolTypeCheckCase('telnet-tcp') + # self.symbolsCheckCase('telnet-tcp') + # self.idSeqCheckCase('telnet-tcp') + # self.idLetterCheckCase('telnet-tcp') + # self.noIdCheckCase('telnet-tcp') + # self.stbTbNameCheckCase('telnet-tcp') + # self.idStartWithNumCheckCase('telnet-tcp') + # self.pointTransCheckCase('telnet-tcp') + # self.tcpKeywordsCheckCase() + except Exception as err: + print(''.join(traceback.format_exception(None, err, err.__traceback__))) + raise err + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/1-insert/test_stmt_insert_query.py b/tests/system-test/1-insert/test_stmt_insert_query.py new file mode 100644 index 0000000000000000000000000000000000000000..c6faedd35ee9f08e50310e5570a9be284d16ecc4 --- /dev/null +++ b/tests/system-test/1-insert/test_stmt_insert_query.py @@ -0,0 +1,261 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import threading as thd +import multiprocessing as mp +from numpy.lib.function_base import insert +import taos +from taos import * +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np +import datetime as dt +from datetime import datetime +from ctypes import * +import time +# constant define +WAITS = 5 # wait seconds + +class TDTestCase: + # + # --------------- main frame ------------------- + def caseDescription(self): + ''' + limit and offset keyword function test cases; + case1: limit offset base function test + case2: offset return valid + ''' + return + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + # init + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + # tdSql.prepare() + # self.create_tables(); + self.ts = 1500000000000 + + # stop + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + + # --------------- case ------------------- + + + def newcon(self,host,cfg): + user = "root" + password = "taosdata" + port =6030 + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) + print(con) + return con + + def test_stmt_insert_multi(self,conn): + # type: (TaosConnection) -> None + + dbname = "pytest_taos_stmt_multi" + try: + conn.execute("drop database if exists %s" % dbname) + conn.execute("create database if not exists %s" % dbname) + conn.select_db(dbname) + + conn.execute( + "create table if not exists log(ts timestamp, bo bool, nil tinyint, ti tinyint, si smallint, ii int,\ + bi bigint, tu tinyint unsigned, su smallint unsigned, iu int unsigned, bu bigint unsigned, \ + ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)", + ) + # conn.load_table_info("log") + + start = datetime.now() + stmt = conn.statement("insert into log values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") + + params = new_multi_binds(16) + params[0].timestamp((1626861392589, 1626861392590, 1626861392591)) + params[1].bool((True, None, False)) + params[2].tinyint([-128, -128, None]) # -128 is tinyint null + params[3].tinyint([0, 127, None]) + params[4].smallint([3, None, 2]) + params[5].int([3, 4, None]) + params[6].bigint([3, 4, None]) + params[7].tinyint_unsigned([3, 4, None]) + params[8].smallint_unsigned([3, 4, None]) + params[9].int_unsigned([3, 4, None]) + params[10].bigint_unsigned([3, 4, None]) + params[11].float([3, None, 1]) + params[12].double([3, None, 1.2]) + params[13].binary(["abc", "dddafadfadfadfadfa", None]) + params[14].nchar(["涛思数据", None, "a long string with 中文字符"]) + params[15].timestamp([None, None, 1626861392591]) + # print(type(stmt)) + stmt.bind_param_batch(params) + stmt.execute() + end = datetime.now() + print("elapsed time: ", end - start) + assert stmt.affected_rows == 3 + + #query + querystmt=conn.statement("select ?,bu from log") + queryparam=new_bind_params(1) + print(type(queryparam)) + queryparam[0].binary("ts") + querystmt.bind_param(queryparam) + querystmt.execute() + result=querystmt.use_result() + rows=result.fetch_all() + print( querystmt.use_result()) + + # result = conn.query("select * from log") + # rows=result.fetch_all() + # rows=result.fetch_all() + print(rows) + assert rows[1][0] == "ts" + assert rows[0][1] == 3 + + #query + querystmt1=conn.statement("select * from log where bu < ?") + queryparam1=new_bind_params(1) + print(type(queryparam1)) + queryparam1[0].int(4) + querystmt1.bind_param(queryparam1) + querystmt1.execute() + result1=querystmt1.use_result() + rows1=result1.fetch_all() + assert str(rows1[0][0]) == "2021-07-21 17:56:32.589000" + assert rows1[0][10] == 3 + + + stmt.close() + + # conn.execute("drop database if exists %s" % dbname) + conn.close() + + except Exception as err: + # conn.execute("drop database if exists %s" % dbname) + conn.close() + raise err + + def test_stmt_set_tbname_tag(self,conn): + dbname = "pytest_taos_stmt_set_tbname_tag" + + try: + conn.execute("drop database if exists %s" % dbname) + conn.execute("create database if not exists %s PRECISION 'us' " % dbname) + conn.select_db(dbname) + conn.execute("create table if not exists log(ts timestamp, bo bool, nil tinyint, ti tinyint, si smallint, ii int,\ + bi bigint, tu tinyint unsigned, su smallint unsigned, iu int unsigned, bu bigint unsigned, \ + ff float, dd double, bb binary(100), nn nchar(100), tt timestamp) tags (t1 timestamp, t2 bool,\ + t3 tinyint, t4 tinyint, t5 smallint, t6 int, t7 bigint, t8 tinyint unsigned, t9 smallint unsigned, \ + t10 int unsigned, t11 bigint unsigned, t12 float, t13 double, t14 binary(100), t15 nchar(100), t16 timestamp)") + + stmt = conn.statement("insert into ? using log tags (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?) \ + values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)") + tags = new_bind_params(16) + tags[0].timestamp(1626861392589123, PrecisionEnum.Microseconds) + tags[1].bool(True) + tags[2].null() + tags[3].tinyint(2) + tags[4].smallint(3) + tags[5].int(4) + tags[6].bigint(5) + tags[7].tinyint_unsigned(6) + tags[8].smallint_unsigned(7) + tags[9].int_unsigned(8) + tags[10].bigint_unsigned(9) + tags[11].float(10.1) + tags[12].double(10.11) + tags[13].binary("hello") + tags[14].nchar("stmt") + tags[15].timestamp(1626861392589, PrecisionEnum.Milliseconds) + stmt.set_tbname_tags("tb1", tags) + params = new_multi_binds(16) + params[0].timestamp((1626861392589111, 1626861392590111, 1626861392591111)) + params[1].bool((True, None, False)) + params[2].tinyint([-128, -128, None]) # -128 is tinyint null + params[3].tinyint([0, 127, None]) + params[4].smallint([3, None, 2]) + params[5].int([3, 4, None]) + params[6].bigint([3, 4, None]) + params[7].tinyint_unsigned([3, 4, None]) + params[8].smallint_unsigned([3, 4, None]) + params[9].int_unsigned([3, 4, None]) + params[10].bigint_unsigned([3, 4, 5]) + params[11].float([3, None, 1]) + params[12].double([3, None, 1.2]) + params[13].binary(["abc", "dddafadfadfadfadfa", None]) + params[14].nchar(["涛思数据", None, "a long string with 中文字符"]) + params[15].timestamp([None, None, 1626861392591]) + + stmt.bind_param_batch(params) + stmt.execute() + + assert stmt.affected_rows == 3 + + #query + querystmt1=conn.statement("select * from log where bu < ?") + queryparam1=new_bind_params(1) + print(type(queryparam1)) + queryparam1[0].int(5) + querystmt1.bind_param(queryparam1) + querystmt1.execute() + result1=querystmt1.use_result() + rows1=result1.fetch_all() + assert str(rows1[0][0]) == "2021-07-21 17:56:32.589111" + assert rows1[0][10] == 3 + assert rows1[1][10] == 4 + + # conn.execute("drop database if exists %s" % dbname) + conn.close() + + except Exception as err: + # conn.execute("drop database if exists %s" % dbname) + conn.close() + raise err + + def run(self): + buildPath = self.getBuildPath() + config = buildPath+ "../sim/dnode1/cfg/" + host="localhost" + connectstmt=self.newcon(host,config) + print(connectstmt) + self.test_stmt_insert_multi(connectstmt) + connectstmt=self.newcon(host,config) + self.test_stmt_set_tbname_tag(connectstmt) + + return + + +# add case with filename +# +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/2-query/To_iso8601.py b/tests/system-test/2-query/To_iso8601.py index cd22ffb90c1fbf86e81dfabecbcb1ae0e536cd39..57bcca638ce26aace35d76707c12699fe2e8d1c4 100644 --- a/tests/system-test/2-query/To_iso8601.py +++ b/tests/system-test/2-query/To_iso8601.py @@ -95,7 +95,7 @@ class TDTestCase: # tdSql.query("select to_iso8601(-1) from ntb") tdSql.query("select to_iso8601(9223372036854775807) from ntb") tdSql.checkRows(3) - + # bug TD-14896 # tdSql.query("select to_iso8601(10000000000) from ntb") # tdSql.checkData(0,0,None) # tdSql.query("select to_iso8601(-1) from ntb") @@ -106,11 +106,6 @@ class TDTestCase: tdSql.error("select to_iso8601(1.5) from db.ntb") tdSql.error("select to_iso8601('a') from ntb") tdSql.error("select to_iso8601(c2) from ntb") - - - - - tdSql.query("select to_iso8601(now) from stb") tdSql.query("select to_iso8601(now()) from stb") tdSql.checkRows(3) @@ -126,7 +121,7 @@ class TDTestCase: tdSql.checkRows(3) tdSql.query("select to_iso8601(ts)+'a' from stb ") tdSql.checkRows(3) - # tdSql.query() + tdSql.query("select to_iso8601(today()) *null from stb") tdSql.checkRows(3) tdSql.checkData(0,0,None) @@ -152,7 +147,9 @@ class TDTestCase: tdSql.checkRows(3) tdSql.checkData(0,0,None) + # bug TD-14896 # tdSql.query("select to_iso8601(-1) from ntb") + # tdSql.checkRows(3) diff --git a/tests/system-test/2-query/apercentile.py b/tests/system-test/2-query/apercentile.py new file mode 100644 index 0000000000000000000000000000000000000000..150c4d3f17e30ab5f4d25fb19af2bb80ee202776 --- /dev/null +++ b/tests/system-test/2-query/apercentile.py @@ -0,0 +1,107 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + self.rowNum = 10 + self.ts = 1537146000000 + + def check_apercentile(self,data,expect_data,param,percent,column): + if param == "default": + if abs((expect_data-data) <= expect_data * 0.2): + tdLog.info(f"apercentile function values check success with col{column}, param = {param},percent = {percent}") + else: + tdLog.notice(f"apercentile function value has not as expected with col{column}, param = {param},percent = {percent}") + sys.exit(1) + elif param == "t-digest": + if abs((expect_data-data) <= expect_data * 0.2): + tdLog.info(f"apercentile function values check success with col{column}, param = {param},percent = {percent}") + else: + tdLog.notice(f"apercentile function value has not as expected with col{column}, param = {param},percent = {percent}") + sys.exit(1) + + def run(self): + tdSql.prepare() + + intData = [] + floatData = [] + percent_list = [0,50,100] + param_list = ['default','t-digest'] + tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''') + for i in range(self.rowNum): + tdSql.execute("insert into test values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + intData.append(i + 1) + floatData.append(i + 0.1) + + # percentile verifacation + + tdSql.error("select apercentile(ts ,20) from test") + tdSql.error("select apercentile(col7 ,20) from test") + tdSql.error("select apercentile(col8 ,20) from test") + tdSql.error("select apercentile(col9 ,20) from test") + + column_list = [1,2,3,4,5,6,11,12,13,14] + + for i in column_list: + for j in percent_list: + for k in param_list: + tdSql.query(f"select apercentile(col{i},{j},'{k}') from test") + data = tdSql.getData(0, 0) + tdSql.query(f"select percentile(col{i},{j}) from test") + expect_data = tdSql.getData(0, 0) + self.check_apercentile(data,expect_data,k,j,i) + + error_param_list = [-1,101,'"a"'] + for i in error_param_list: + tdSql.error(f'select apercentile(col1,{i}) from test') + + tdSql.execute("create table meters (ts timestamp, voltage int) tags(loc nchar(20))") + tdSql.execute("create table t0 using meters tags('beijing')") + tdSql.execute("create table t1 using meters tags('shanghai')") + for i in range(self.rowNum): + tdSql.execute("insert into t0 values(%d, %d)" % (self.ts + i, i + 1)) + tdSql.execute("insert into t1 values(%d, %d)" % (self.ts + i, i + 1)) + + column_list = ['voltage'] + for i in column_list: + for j in percent_list: + for k in param_list: + tdSql.query(f"select apercentile({i}, {j},'{k}') from t0") + data = tdSql.getData(0, 0) + tdSql.query(f"select percentile({i},{j}) from t0") + expect_data = tdSql.getData(0,0) + self.check_apercentile(data,expect_data,k,j,i) + tdSql.query(f"select apercentile({i}, {j},'{k}') from meters") + tdSql.checkRows(1) + table_list = ["meters","t0"] + for i in error_param_list: + for j in table_list: + for k in column_list: + tdSql.error(f'select apercentile({k},{i}) from {j}') + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/avg.py b/tests/system-test/2-query/avg.py new file mode 100644 index 0000000000000000000000000000000000000000..20ee6df7fcf94e3b02641b735c6ad7fd1ce862ff --- /dev/null +++ b/tests/system-test/2-query/avg.py @@ -0,0 +1,424 @@ +import taos +import sys +import datetime +import inspect + +from util.log import * +from util.sql import * +from util.cases import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , + "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, + "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143} + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), True) + + def prepare_datas(self): + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + + for i in range(9): + tdSql.execute( + f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute( + f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + + tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + + tdSql.execute( + f'''insert into t1 values + ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) + ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) + ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a ) + ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a ) + ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a ) + ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a ) + ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" ) + ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" ) + ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" ) + ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ''' + ) + + def check_avg(self ,origin_query , check_query): + avg_result = tdSql.getResult(origin_query) + origin_result = tdSql.getResult(check_query) + + check_status = True + for row_index , row in enumerate(avg_result): + for col_index , elem in enumerate(row): + if avg_result[row_index][col_index] != origin_result[row_index][col_index]: + check_status = False + if not check_status: + tdLog.notice("avg function value has not as expected , sql is \"%s\" "%origin_query ) + sys.exit(1) + else: + tdLog.info("avg value check pass , it work as expected ,sql is \"%s\" "%check_query ) + + def test_errors(self): + error_sql_lists = [ + "select avg from t1", + # "select avg(-+--+c1) from t1", + # "select +-avg(c1) from t1", + # "select ++-avg(c1) from t1", + # "select ++--avg(c1) from t1", + # "select - -avg(c1)*0 from t1", + # "select avg(tbname+1) from t1 ", + "select avg(123--123)==1 from t1", + "select avg(c1) as 'd1' from t1", + "select avg(c1 ,c2 ) from t1", + "select avg(c1 ,NULL) from t1", + "select avg(,) from t1;", + "select avg(avg(c1) ab from t1)", + "select avg(c1) as int from t1", + "select avg from stb1", + # "select avg(-+--+c1) from stb1", + # "select +-avg(c1) from stb1", + # "select ++-avg(c1) from stb1", + # "select ++--avg(c1) from stb1", + # "select - -avg(c1)*0 from stb1", + # "select avg(tbname+1) from stb1 ", + "select avg(123--123)==1 from stb1", + "select avg(c1) as 'd1' from stb1", + "select avg(c1 ,c2 ) from stb1", + "select avg(c1 ,NULL) from stb1", + "select avg(,) from stb1;", + "select avg(avg(c1) ab from stb1)", + "select avg(c1) as int from stb1" + ] + for error_sql in error_sql_lists: + tdSql.error(error_sql) + + def support_types(self): + type_error_sql_lists = [ + "select avg(ts) from t1" , + "select avg(c7) from t1", + "select avg(c8) from t1", + "select avg(c9) from t1", + "select avg(ts) from ct1" , + "select avg(c7) from ct1", + "select avg(c8) from ct1", + "select avg(c9) from ct1", + "select avg(ts) from ct3" , + "select avg(c7) from ct3", + "select avg(c8) from ct3", + "select avg(c9) from ct3", + "select avg(ts) from ct4" , + "select avg(c7) from ct4", + "select avg(c8) from ct4", + "select avg(c9) from ct4", + "select avg(ts) from stb1" , + "select avg(c7) from stb1", + "select avg(c8) from stb1", + "select avg(c9) from stb1" , + + "select avg(ts) from stbbb1" , + "select avg(c7) from stbbb1", + + "select avg(ts) from tbname", + "select avg(c9) from tbname" + + ] + + for type_sql in type_error_sql_lists: + tdSql.error(type_sql) + + + type_sql_lists = [ + "select avg(c1) from t1", + "select avg(c2) from t1", + "select avg(c3) from t1", + "select avg(c4) from t1", + "select avg(c5) from t1", + "select avg(c6) from t1", + + "select avg(c1) from ct1", + "select avg(c2) from ct1", + "select avg(c3) from ct1", + "select avg(c4) from ct1", + "select avg(c5) from ct1", + "select avg(c6) from ct1", + + "select avg(c1) from ct3", + "select avg(c2) from ct3", + "select avg(c3) from ct3", + "select avg(c4) from ct3", + "select avg(c5) from ct3", + "select avg(c6) from ct3", + + "select avg(c1) from stb1", + "select avg(c2) from stb1", + "select avg(c3) from stb1", + "select avg(c4) from stb1", + "select avg(c5) from stb1", + "select avg(c6) from stb1", + + "select avg(c6) as alisb from stb1", + "select avg(c6) alisb from stb1", + ] + + for type_sql in type_sql_lists: + tdSql.query(type_sql) + + def basic_avg_function(self): + + # basic query + tdSql.query("select c1 from ct3") + tdSql.checkRows(0) + tdSql.query("select c1 from t1") + tdSql.checkRows(12) + tdSql.query("select c1 from stb1") + tdSql.checkRows(25) + + # used for empty table , ct3 is empty + tdSql.query("select avg(c1) from ct3") + tdSql.checkRows(0) + tdSql.query("select avg(c2) from ct3") + tdSql.checkRows(0) + tdSql.query("select avg(c3) from ct3") + tdSql.checkRows(0) + tdSql.query("select avg(c4) from ct3") + tdSql.checkRows(0) + tdSql.query("select avg(c5) from ct3") + tdSql.checkRows(0) + tdSql.query("select avg(c6) from ct3") + + # used for regular table + tdSql.query("select avg(c1) from t1") + tdSql.checkData(0, 0, 5.000000000) + + + tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1") + tdSql.checkData(1, 5, 1.11000) + tdSql.checkData(3, 4, 33) + tdSql.checkData(5, 5, None) + self.check_avg(" select avg(c1) , avg(c2) , avg(c3) from t1 " , " select sum(c1)/count(c1) , sum(c2)/count(c2) , sum(c3)/count(c3) from t1 ") + + # used for sub table + tdSql.query("select avg(c1) from ct1") + tdSql.checkData(0, 0, 4.846153846) + + tdSql.query("select avg(c1) from ct3") + tdSql.checkRows(0) + + self.check_avg(" select avg(abs(c1)) , avg(abs(c2)) , avg(abs(c3)) from t1 " , " select sum(abs(c1))/count(c1) , sum(abs(c2))/count(c2) , sum(abs(c3))/count(c3) from t1 ") + self.check_avg(" select avg(abs(c1)) , avg(abs(c2)) , avg(abs(c3)) from stb1 " , " select sum(abs(c1))/count(c1) , sum(abs(c2))/count(c2) , sum(abs(c3))/count(c3) from stb1 ") + + # used for stable table + + tdSql.query("select avg(c1) from stb1") + tdSql.checkRows(1) + + self.check_avg(" select avg(abs(ceil(c1))) , avg(abs(ceil(c2))) , avg(abs(ceil(c3))) from stb1 " , " select sum(abs(ceil(c1)))/count(c1) , sum(abs(ceil(c2)))/count(c2) , sum(abs(ceil(c3)))/count(c3) from stb1 ") + + # used for not exists table + tdSql.error("select avg(c1) from stbbb1") + tdSql.error("select avg(c1) from tbname") + tdSql.error("select avg(c1) from ct5") + + # mix with common col + tdSql.error("select c1, avg(c1) from ct1") + tdSql.error("select c1, avg(c1) from ct4") + + + # mix with common functions + tdSql.error("select c1, avg(c1),c5, floor(c5) from ct4 ") + tdSql.error("select c1, avg(c1),c5, floor(c5) from stb1 ") + + # mix with agg functions , not support + tdSql.error("select c1, avg(c1),c5, count(c5) from stb1 ") + tdSql.error("select c1, avg(c1),c5, count(c5) from ct1 ") + tdSql.error("select c1, count(c5) from ct1 ") + tdSql.error("select c1, count(c5) from stb1 ") + + # agg functions mix with agg functions + + tdSql.query(" select max(c5), count(c5) , avg(c5) from stb1 ") + tdSql.checkData(0, 0, 8.88000 ) + tdSql.checkData(0, 1, 22 ) + tdSql.checkData(0, 2, 2.270454591 ) + + tdSql.query(" select max(c5), count(c5) , avg(c5) ,elapsed(ts) , spread(c1) from ct1; ") + tdSql.checkData(0, 0, 8.88000 ) + tdSql.checkData(0, 1, 13 ) + tdSql.checkData(0, 2, 0.768461603 ) + + # bug fix for count + tdSql.query("select count(c1) from ct4 ") + tdSql.checkData(0,0,9) + tdSql.query("select count(*) from ct4 ") + tdSql.checkData(0,0,12) + tdSql.query("select count(c1) from stb1 ") + tdSql.checkData(0,0,22) + tdSql.query("select count(*) from stb1 ") + tdSql.checkData(0,0,25) + + # bug fix for compute + tdSql.error("select c1, avg(c1) -0 ,ceil(c1)-0 from ct4 ") + tdSql.error(" select c1, avg(c1) -0 ,avg(ceil(c1-0.1))-0.1 from ct4") + + # mix with nest query + self.check_avg("select avg(col) from (select abs(c1) col from stb1)" , "select avg(abs(c1)) from stb1") + self.check_avg("select avg(col) from (select ceil(abs(c1)) col from stb1)" , "select avg(abs(c1)) from stb1") + + tdSql.query(" select abs(avg(abs(abs(c1)))) from stb1 ") + tdSql.checkData(0, 0, 4.500000000) + tdSql.query(" select abs(avg(abs(abs(c1)))) from t1 ") + tdSql.checkData(0, 0, 5.000000000) + + tdSql.query(" select abs(avg(abs(abs(c1)))) from stb1 ") + tdSql.checkData(0, 0, 4.500000000) + + tdSql.query(" select avg(c1) from stb1 where c1 is null ") + tdSql.checkRows(0) + + + def avg_func_filter(self): + tdSql.execute("use db") + tdSql.query(" select avg(c1), avg(c1) -0 ,avg(ceil(c1-0.1))-0 ,avg(floor(c1+0.1))-0.1 ,avg(ceil(log(c1,2)-0.5)) from ct4 where c1>5 ") + tdSql.checkRows(1) + tdSql.checkData(0,0,7.000000000) + tdSql.checkData(0,1,7.000000000) + tdSql.checkData(0,2,7.000000000) + tdSql.checkData(0,3,6.900000000) + tdSql.checkData(0,4,3.000000000) + + tdSql.query("select avg(c1), avg(c1) -0 ,avg(ceil(c1-0.1))-0 ,avg(floor(c1+0.1))-0.1 ,avg(ceil(log(c1,2)-0.5)) from ct4 where c1=5 ") + tdSql.checkRows(1) + tdSql.checkData(0,0,5.000000000) + tdSql.checkData(0,1,5.000000000) + tdSql.checkData(0,2,5.000000000) + tdSql.checkData(0,3,4.900000000) + tdSql.checkData(0,4,2.000000000) + + tdSql.query("select avg(c1) ,avg(c2) , avg(c1) -0 , avg(ceil(c1-0.1))-0 ,avg(floor(c1+0.1))-0.1 ,avg(ceil(log(c1,2))-0.5) from ct4 where c1>log(c1,2) limit 1 ") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 4.500000000) + tdSql.checkData(0, 1, 49999.500000000) + tdSql.checkData(0, 5, 1.625000000) + + def avg_Arithmetic(self): + pass + + def check_boundary_values(self): + + tdSql.execute("drop database if exists bound_test") + tdSql.execute("create database if not exists bound_test") + time.sleep(3) + tdSql.execute("use bound_test") + tdSql.execute( + "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" + ) + tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )') + tdSql.execute( + f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + tdSql.execute( + f"insert into sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + tdSql.execute( + f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + + tdSql.execute( + f"insert into sub1_bound values ( now(), 2147483645, 9223372036854775805, 32765, 125, 3.40E+37, 1.7e+307, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + + tdSql.execute( + f"insert into sub1_bound values ( now(), 2147483644, 9223372036854775804, 32764, 124, 3.40E+37, 1.7e+307, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + + tdSql.execute( + f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + tdSql.execute( + f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + + + tdSql.error( + f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + self.check_avg("select avg(c1), avg(c2), avg(c3) , avg(c4), avg(c5) ,avg(c6) from sub1_bound " , " select sum(c1)/count(c1), sum(c2)/count(c2) ,sum(c3)/count(c3), sum(c4)/count(c4), sum(c5)/count(c5) ,sum(c6)/count(c6) from sub1_bound ") + + + # check basic elem for table per row + tdSql.query("select avg(c1) ,avg(c2) , avg(c3) , avg(c4), avg(c5), avg(c6) from sub1_bound ") + tdSql.checkRows(1) + tdSql.checkData(0,0,920350133.571428537) + tdSql.checkData(0,1,1.3176245766935393e+18) + tdSql.checkData(0,2,14042.142857143) + tdSql.checkData(0,3,53.571428571) + tdSql.checkData(0,4,5.828571332045761e+37) + # tdSql.checkData(0,5,None) + + + # check + - * / in functions + tdSql.query(" select avg(c1+1) ,avg(c2) , avg(c3*1) , avg(c4/2), avg(c5)/2, avg(c6) from sub1_bound ") + tdSql.checkData(0,0,920350134.5714285) + tdSql.checkData(0,1,1.3176245766935393e+18) + tdSql.checkData(0,2,14042.142857143) + tdSql.checkData(0,3,26.785714286) + tdSql.checkData(0,4,2.9142856660228804e+37) + # tdSql.checkData(0,5,None) + + + + def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table ==============") + + self.prepare_datas() + + tdLog.printNoPrefix("==========step2:test errors ==============") + + self.test_errors() + + tdLog.printNoPrefix("==========step3:support types ============") + + self.support_types() + + tdLog.printNoPrefix("==========step4: avg basic query ============") + + self.basic_avg_function() + + tdLog.printNoPrefix("==========step5: avg boundary query ============") + + self.check_boundary_values() + + tdLog.printNoPrefix("==========step6: avg filter query ============") + + self.avg_func_filter() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/between.py b/tests/system-test/2-query/between.py index 3b9465dd263cc6774fdf580630bb578629e4ce8b..44750abd4648260ceb68ba03239cb128e4eaf53b 100644 --- a/tests/system-test/2-query/between.py +++ b/tests/system-test/2-query/between.py @@ -175,16 +175,17 @@ class TDTestCase: tdLog.printNoPrefix("==========step10:invalid query type") - tdSql.query("select * from supt where location between 'beijing' and 'shanghai'") - tdSql.checkRows(23) - # 非0值均解析为1,因此"between 负值 and o"解析为"between 1 and 0" - tdSql.query("select * from supt where isused between 0 and 1") - tdSql.checkRows(23) - tdSql.query("select * from supt where isused between -1 and 0") - tdSql.checkRows(0) - tdSql.error("select * from supt where isused between false and true") - tdSql.query("select * from supt where family between '拖拉机' and '自行车'") - tdSql.checkRows(23) + # TODO tag is not finished + # tdSql.query("select * from supt where location between 'beijing' and 'shanghai'") + # tdSql.checkRows(23) + # # 非0值均解析为1,因此"between 负值 and o"解析为"between 1 and 0" + # tdSql.query("select * from supt where isused between 0 and 1") + # tdSql.checkRows(23) + # tdSql.query("select * from supt where isused between -1 and 0") + # tdSql.checkRows(0) + # tdSql.error("select * from supt where isused between false and true") + # tdSql.query("select * from supt where family between '拖拉机' and '自行车'") + # tdSql.checkRows(23) tdLog.printNoPrefix("==========step11:query HEX/OCT/BIN type") diff --git a/tests/system-test/2-query/bottom.py b/tests/system-test/2-query/bottom.py new file mode 100644 index 0000000000000000000000000000000000000000..a4390372dfa13ae4d6db6e545fc472b0395aed53 --- /dev/null +++ b/tests/system-test/2-query/bottom.py @@ -0,0 +1,106 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from util.log import * +from util.cases import * +from util.sql import * + + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.rowNum = 10 + self.ts = 1537146000000 + + def run(self): + tdSql.prepare() + + tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') + tdSql.execute("create table test1 using test tags('beijing')") + for i in range(self.rowNum): + tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + + # bottom verifacation + tdSql.error("select bottom(ts, 10) from test") + tdSql.error("select bottom(col1, 0) from test") + tdSql.error("select bottom(col1, 101) from test") + tdSql.error("select bottom(col2, 0) from test") + tdSql.error("select bottom(col2, 101) from test") + tdSql.error("select bottom(col3, 0) from test") + tdSql.error("select bottom(col3, 101) from test") + tdSql.error("select bottom(col4, 0) from test") + tdSql.error("select bottom(col4, 101) from test") + tdSql.error("select bottom(col5, 0) from test") + tdSql.error("select bottom(col5, 101) from test") + tdSql.error("select bottom(col6, 0) from test") + tdSql.error("select bottom(col6, 101) from test") + tdSql.error("select bottom(col7, 10) from test") + tdSql.error("select bottom(col8, 10) from test") + tdSql.error("select bottom(col9, 10) from test") + + tdSql.query("select bottom(col1, 2) from test") + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)]) + tdSql.query("select bottom(col2, 2) from test") + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)]) + + tdSql.query("select bottom(col3, 2) from test") + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)]) + + tdSql.query("select bottom(col4, 2) from test") + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)]) + + tdSql.query("select bottom(col11, 2) from test") + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)]) + + tdSql.query("select bottom(col12, 2) from test") + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)]) + + tdSql.query("select bottom(col13, 2) from test") + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)]) + + tdSql.query("select bottom(col13,50) from test") + tdSql.checkRows(10) + + tdSql.query("select bottom(col14, 2) from test") + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)]) + tdSql.query("select ts,bottom(col1, 2) from test1") + tdSql.checkRows(2) + tdSql.query("select ts,bottom(col1, 2),ts from test group by tbname") + tdSql.checkRows(2) + + tdSql.query('select bottom(col2,1) from test interval(1y) order by col2') + tdSql.checkData(0,0,1) + + + tdSql.error('select * from test where bottom(col2,1)=1') + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/check_tsdb.py b/tests/system-test/2-query/check_tsdb.py new file mode 100644 index 0000000000000000000000000000000000000000..33bf351207ebeacbfea514c2733700656e757d55 --- /dev/null +++ b/tests/system-test/2-query/check_tsdb.py @@ -0,0 +1,106 @@ +import taos +import sys +import datetime +import inspect + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , + "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, + "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143} + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), True) + + def prepare_datas(self): + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + + for i in range(9): + tdSql.execute( + f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute( + f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + + tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + + tdSql.execute( + f'''insert into t1 values + ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) + ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) + ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a ) + ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a ) + ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a ) + ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a ) + ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" ) + ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" ) + ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" ) + ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ''' + ) + + + def restart_taosd_query_sum(self): + + for i in range(5): + tdLog.info(" this is %d_th restart taosd " %i) + os.system("taos -s ' use db ;select c6 from stb1 ; '") + tdSql.execute("use db ") + tdSql.query("select count(*) from stb1") + tdSql.checkRows(1) + tdSql.query("select sum(c1),sum(c2),sum(c3),sum(c4),sum(c5),sum(c6) from stb1;") + tdSql.checkData(0,0,99) + tdSql.checkData(0,1,499995) + tdSql.checkData(0,2,4995) + tdSql.checkData(0,3,594) + tdSql.checkData(0,4,49.950001001) + tdSql.checkData(0,5,599.940000000) + tdDnodes.stop(1) + tdDnodes.start(1) + time.sleep(2) + + + + def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table ==============") + + self.prepare_datas() + + os.system("taos -s ' select c6 from stb1 ; '") + self.restart_taosd_query_sum() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/concat.py b/tests/system-test/2-query/concat.py index 1167b444d2eb6f753a5d662586afb0dfe30dff0b..59fae9b59d62599e3bca23c393ecc854aed9c186 100644 --- a/tests/system-test/2-query/concat.py +++ b/tests/system-test/2-query/concat.py @@ -36,19 +36,19 @@ class TDTestCase: concat_condition.extend( ( char_col, - f"upper( {char_col} )", + # f"upper( {char_col} )", ) ) concat_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL) concat_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL ) - concat_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) - concat_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + # concat_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + # concat_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) concat_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL ) # concat_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL ) concat_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL ) for num_col in NUM_COL: - concat_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + # concat_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) concat_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL) concat_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL ) @@ -96,7 +96,6 @@ class TDTestCase: [ tdSql.query(f"select concat( {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ] - def __concat_err_check(self,tbname): sqls = [] @@ -139,7 +138,11 @@ class TDTestCase: def __test_current(self): # sourcery skip: use-itertools-product tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") - tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + tbname = [ + "ct1", + "ct2", + "ct4", + ] for tb in tbname: for i in range(2,8): self.__concat_check(tb,i) @@ -147,7 +150,10 @@ class TDTestCase: def __test_error(self): tdLog.printNoPrefix("==========err sql condition check , must return error==========") - tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + tbname = [ + "t1", + "stb1", + ] for tb in tbname: for errsql in self.__concat_err_check(tb): diff --git a/tests/system-test/2-query/concat2.py b/tests/system-test/2-query/concat2.py new file mode 100644 index 0000000000000000000000000000000000000000..717766e7ffcaafcc164cc1519d0a3a657d5e387c --- /dev/null +++ b/tests/system-test/2-query/concat2.py @@ -0,0 +1,293 @@ +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + + +PRIMARY_COL = "ts" + +INT_COL = "c1" +BINT_COL = "c2" +SINT_COL = "c3" +TINT_COL = "c4" +FLOAT_COL = "c5" +DOUBLE_COL = "c6" +BOOL_COL = "c7" + +BINARY_COL = "c8" +NCHAR_COL = "c9" +TS_COL = "c10" + +NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ] +CHAR_COL = [ BINARY_COL, NCHAR_COL, ] +BOOLEAN_COL = [ BOOL_COL, ] +TS_TYPE_COL = [ TS_COL, ] + + +class TDTestCase: + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def __concat_condition(self): # sourcery skip: extract-method + concat_condition = [] + for char_col in CHAR_COL: + concat_condition.extend( + ( + char_col, + # f"upper( {char_col} )", + ) + ) + concat_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL) + concat_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL ) + # concat_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + # concat_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + concat_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL ) + # concat_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL ) + concat_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL ) + + for num_col in NUM_COL: + # concat_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + concat_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL) + + concat_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL ) + + concat_condition.append('''"test1234!@#$%^&*():'> 0 " + return "" + + def __concat_num(self, concat_lists, num): + return [ concat_lists[i] for i in range(num) ] + + + def __group_condition(self, col, having = ""): + return f" group by {col} having {having}" if having else f" group by {col} " + + def __concat_check(self, tbname, num): + concat_condition = self.__concat_condition() + for i in range(len(concat_condition) - num + 1 ): + condition = self.__concat_num(concat_condition[i:], num) + concat_filter = f"concat( {','.join( condition ) }) " + where_condition = self.__where_condition(condition[0]) + # group_having = self.__group_condition(condition[0], having=f"{condition[0]} is not null " ) + concat_group_having = self.__group_condition(concat_filter, having=f"{concat_filter} is not null " ) + # group_no_having= self.__group_condition(condition[0] ) + concat_group_no_having= self.__group_condition(concat_filter) + groups = ["", concat_group_having, concat_group_no_having] + + if num > 8 or num < 2 : + [tdSql.error(f"select concat( {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ] + break + + tdSql.query(f"select {','.join(condition)} from {tbname} ") + rows = tdSql.queryRows + concat_data = [] + for m in range(rows): + concat_data.append("".join(tdSql.queryResult[m])) if tdSql.getData(m, 0) else concat_data.append(None) + tdSql.query(f"select concat( {','.join( condition ) }) from {tbname} ") + tdSql.checkRows(rows) + for j in range(tdSql.queryRows): + assert tdSql.getData(j, 0) in concat_data + + [ tdSql.query(f"select concat( {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ] + + + def __concat_err_check(self,tbname): + sqls = [] + + for char_col in CHAR_COL: + sqls.extend( + ( + f"select concat( {char_col} ) from {tbname} ", + f"select concat(ceil( {char_col} )) from {tbname} ", + f"select {char_col} from {tbname} group by concat( {char_col} ) ", + ) + ) + + sqls.extend( f"select concat( {char_col} , {num_col} ) from {tbname} " for num_col in NUM_COL ) + sqls.extend( f"select concat( {char_col} , {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL ) + sqls.extend( f"select concat( {char_col} , {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL ) + + sqls.extend( f"select concat( {ts_col}, {bool_col} ) from {tbname} " for ts_col in TS_TYPE_COL for bool_col in BOOLEAN_COL ) + sqls.extend( f"select concat( {num_col} , {ts_col} ) from {tbname} " for num_col in NUM_COL for ts_col in TS_TYPE_COL) + sqls.extend( f"select concat( {num_col} , {bool_col} ) from {tbname} " for num_col in NUM_COL for bool_col in BOOLEAN_COL) + sqls.extend( f"select concat( {num_col} , {num_col} ) from {tbname} " for num_col in NUM_COL for num_col in NUM_COL) + sqls.extend( f"select concat( {ts_col}, {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL for ts_col in TS_TYPE_COL ) + sqls.extend( f"select concat( {bool_col}, {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL for bool_col in BOOLEAN_COL ) + + sqls.extend( f"select concat( {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL ) + sqls.extend( f"select concat({char_col}, 11) from {tbname} " for char_col in CHAR_COL ) + sqls.extend( f"select concat({num_col}, '1') from {tbname} " for num_col in NUM_COL ) + sqls.extend( f"select concat({ts_col}, '1') from {tbname} " for ts_col in TS_TYPE_COL ) + sqls.extend( f"select concat({bool_col}, '1') from {tbname} " for bool_col in BOOLEAN_COL ) + sqls.extend( f"select concat({char_col},'1') from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL ) + sqls.extend( + ( + f"select concat() from {tbname} ", + f"select concat(*) from {tbname} ", + f"select concat(ccccccc) from {tbname} ", + f"select concat(111) from {tbname} ", + ) + ) + + return sqls + + def __test_current(self): # sourcery skip: use-itertools-product + tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") + tbname = [ + "t1", + "stb1", + ] + for tb in tbname: + for i in range(2,8): + self.__concat_check(tb,i) + tdLog.printNoPrefix(f"==========current sql condition check in {tb}, col num: {i} over==========") + + def __test_error(self): + tdLog.printNoPrefix("==========err sql condition check , must return error==========") + tbname = [ + "ct1", + "ct4", + ] + + for tb in tbname: + for errsql in self.__concat_err_check(tb): + tdSql.error(sql=errsql) + self.__concat_check(tb,1) + self.__concat_check(tb,9) + tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========") + + + def all_test(self): + self.__test_current() + self.__test_error() + + + def __create_tb(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + create_stb_sql = f'''create table stb1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) tags (t1 int) + ''' + create_ntb_sql = f'''create table t1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) + ''' + tdSql.execute(create_stb_sql) + tdSql.execute(create_ntb_sql) + + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + + def __insert_data(self, rows): + now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) + for i in range(rows): + tdSql.execute( + f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f'''insert into ct1 values + ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) + ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) + ''' + ) + + tdSql.execute( + f'''insert into ct4 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000} + ) + ( + { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000} + ) + ''' + ) + + tdSql.execute( + f'''insert into ct2 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126, + { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127, + { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + for i in range(rows): + insert_data = f'''insert into t1 values + ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, + "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) + ''' + tdSql.execute(insert_data) + tdSql.execute( + f'''insert into t1 values + ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, + "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, + "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + def run(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + self.__create_tb() + + tdLog.printNoPrefix("==========step2:insert data") + self.rows = 10 + self.__insert_data(self.rows) + + tdLog.printNoPrefix("==========step3:all check") + self.all_test() + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.execute("use db") + + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/concat_ws.py b/tests/system-test/2-query/concat_ws.py index 876a1c88055b0ab3ca3b1046d180365fc089ae0d..2c179b97ce0757670f31498c4dfa3926018854d9 100644 --- a/tests/system-test/2-query/concat_ws.py +++ b/tests/system-test/2-query/concat_ws.py @@ -36,22 +36,22 @@ class TDTestCase: concat_ws_condition.extend( ( char_col, - f"upper( {char_col} )", + # f"upper( {char_col} )", ) ) concat_ws_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL) concat_ws_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL ) - concat_ws_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) - concat_ws_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + # concat_ws_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + # concat_ws_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) concat_ws_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL ) # concat_ws_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL ) concat_ws_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL ) for num_col in NUM_COL: - concat_ws_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + # concat_ws_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) concat_ws_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL) - concat_ws_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL ) + # concat_ws_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL ) concat_ws_condition.append('''"test1234!@#$%^&*():'> 0 " + return "" + + def __concat_ws_num(self, concat_ws_lists, num): + return [ concat_ws_lists[i] for i in range(num) ] + + + def __group_condition(self, col, having = ""): + return f" group by {col} having {having}" if having else f" group by {col} " + + def __concat_ws_check(self, tbname, num): + concat_ws_condition = self.__concat_ws_condition() + for i in range(len(concat_ws_condition) - num + 1 ): + condition = self.__concat_ws_num(concat_ws_condition[i:], num) + concat_ws_filter = f"concat_ws('_', {','.join( condition ) }) " + where_condition = self.__where_condition(condition[0]) + # group_having = self.__group_condition(condition[0], having=f"{condition[0]} is not null " ) + concat_ws_group_having = self.__group_condition(concat_ws_filter, having=f"{concat_ws_filter} is not null " ) + # group_no_having= self.__group_condition(condition[0] ) + concat_ws_group_no_having= self.__group_condition(concat_ws_filter) + groups = ["", concat_ws_group_having, concat_ws_group_no_having] + + if num > 8 or num < 2 : + [tdSql.error(f"select concat_ws('_', {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ] + break + + tdSql.query(f"select {','.join(condition)} from {tbname} ") + rows = tdSql.queryRows + concat_ws_data = [] + for m in range(rows): + concat_ws_data.append("_".join(tdSql.queryResult[m])) if tdSql.getData(m, 0) else concat_ws_data.append(None) + tdSql.query(f"select concat_ws('_', {','.join( condition ) }) from {tbname} ") + tdSql.checkRows(rows) + for j in range(tdSql.queryRows): + assert tdSql.getData(j, 0) in concat_ws_data + + [ tdSql.query(f"select concat_ws('_', {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ] + + + def __concat_ws_err_check(self,tbname): + sqls = [] + + for char_col in CHAR_COL: + sqls.extend( + ( + f"select concat_ws('_', {char_col} ) from {tbname} ", + f"select concat_ws('_', ceil( {char_col} )) from {tbname} ", + f"select {char_col} from {tbname} group by concat_ws('_', {char_col} ) ", + ) + ) + + sqls.extend( f"select concat_ws('_', {char_col} , {num_col} ) from {tbname} " for num_col in NUM_COL ) + sqls.extend( f"select concat_ws('_', {char_col} , {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL ) + sqls.extend( f"select concat_ws('_', {char_col} , {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL ) + + sqls.extend( f"select concat_ws('_', {ts_col}, {bool_col} ) from {tbname} " for ts_col in TS_TYPE_COL for bool_col in BOOLEAN_COL ) + sqls.extend( f"select concat_ws('_', {num_col} , {ts_col} ) from {tbname} " for num_col in NUM_COL for ts_col in TS_TYPE_COL) + sqls.extend( f"select concat_ws('_', {num_col} , {bool_col} ) from {tbname} " for num_col in NUM_COL for bool_col in BOOLEAN_COL) + sqls.extend( f"select concat_ws('_', {num_col} , {num_col} ) from {tbname} " for num_col in NUM_COL for num_col in NUM_COL) + sqls.extend( f"select concat_ws('_', {ts_col}, {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL for ts_col in TS_TYPE_COL ) + sqls.extend( f"select concat_ws('_', {bool_col}, {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL for bool_col in BOOLEAN_COL ) + + sqls.extend( f"select concat_ws('_', {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL ) + sqls.extend( f"select concat_ws('_', {char_col}, 11) from {tbname} " for char_col in CHAR_COL ) + sqls.extend( f"select concat_ws('_', {num_col}, '1') from {tbname} " for num_col in NUM_COL ) + sqls.extend( f"select concat_ws('_', {ts_col}, '1') from {tbname} " for ts_col in TS_TYPE_COL ) + sqls.extend( f"select concat_ws('_', {bool_col}, '1') from {tbname} " for bool_col in BOOLEAN_COL ) + sqls.extend( f"select concat_ws('_', {char_col},'1') from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL ) + sqls.extend( + ( + f"select concat_ws('_', ) from {tbname} ", + f"select concat_ws('_', *) from {tbname} ", + f"select concat_ws('_', ccccccc) from {tbname} ", + f"select concat_ws('_', 111) from {tbname} ", + ) + ) + + return sqls + + def __test_current(self): # sourcery skip: use-itertools-product + tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") + tbname = [ + "ct1", + "ct2", + "ct4", + ] + for tb in tbname: + for i in range(2,8): + self.__concat_ws_check(tb,i) + tdLog.printNoPrefix(f"==========current sql condition check in {tb}, col num: {i} over==========") + + def __test_error(self): + tdLog.printNoPrefix("==========err sql condition check , must return error==========") + tbname = [ + "t1", + "stb1" + ] + + for tb in tbname: + for errsql in self.__concat_ws_err_check(tb): + tdSql.error(sql=errsql) + self.__concat_ws_check(tb,1) + self.__concat_ws_check(tb,9) + tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========") + + + def all_test(self): + self.__test_current() + self.__test_error() + + + def __create_tb(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + create_stb_sql = f'''create table stb1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) tags (t1 int) + ''' + create_ntb_sql = f'''create table t1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) + ''' + tdSql.execute(create_stb_sql) + tdSql.execute(create_ntb_sql) + + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + + def __insert_data(self, rows): + now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) + for i in range(rows): + tdSql.execute( + f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f'''insert into ct1 values + ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) + ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) + ''' + ) + + tdSql.execute( + f'''insert into ct4 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000} + ) + ( + { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000} + ) + ''' + ) + + tdSql.execute( + f'''insert into ct2 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126, + { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127, + { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + for i in range(rows): + insert_data = f'''insert into t1 values + ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, + "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) + ''' + tdSql.execute(insert_data) + tdSql.execute( + f'''insert into t1 values + ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, + "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, + "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + def run(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + self.__create_tb() + + tdLog.printNoPrefix("==========step2:insert data") + self.rows = 10 + self.__insert_data(self.rows) + + tdLog.printNoPrefix("==========step3:all check") + self.all_test() + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.execute("use db") + + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/csum.py b/tests/system-test/2-query/csum.py new file mode 100644 index 0000000000000000000000000000000000000000..a331311fd2e841da5fd4f6da86ccb27834fcbc69 --- /dev/null +++ b/tests/system-test/2-query/csum.py @@ -0,0 +1,428 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import subprocess +import random +import math +import numpy as np +import inspect +import re + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def csum_query_form(self, col="c1", alias="", table_expr="t1", condition=""): + + ''' + csum function: + :param col: string, column name, required parameters; + :param alias: string, result column another name,or add other funtion; + :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters; + :param condition: expression; + :param args: other funtions,like: ', last(col)',or give result column another name, like 'c2' + :return: csum query statement,default: select csum(c1) from t1 + ''' + + return f"select csum({col}) {alias} from {table_expr} {condition}" + + def checkcsum(self,col="c1", alias="", table_expr="t1", condition="" ): + line = sys._getframe().f_back.f_lineno + pre_sql = self.csum_query_form( + col=col, table_expr=table_expr, condition=condition + ).replace("csum", "count") + tdSql.query(pre_sql) + + if tdSql.queryRows == 0: + tdSql.query(self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + print(f"case in {line}: ", end='') + tdSql.checkRows(0) + return + + if "order by tbname" in condition: + tdSql.error(self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + return + + if "group" in condition: + + tb_condition = condition.split("group by")[1].split(" ")[1] + tdSql.query(f"select distinct {tb_condition} from {table_expr}") + query_result = tdSql.queryResult + query_rows = tdSql.queryRows + clear_condition = re.sub('order by [0-9a-z]*|slimit [0-9]*|soffset [0-9]*', "", condition) + + pre_row = 0 + for i in range(query_rows): + group_name = query_result[i][0] + if "where" in clear_condition: + pre_condition = re.sub('group by [0-9a-z]*', f"{tb_condition}='{group_name}'", clear_condition) + else: + pre_condition = "where " + re.sub('group by [0-9a-z]*',f"{tb_condition}='{group_name}'", clear_condition) + + tdSql.query(f"select {col} {alias} from {table_expr} {pre_condition}") + pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + print("data is ", pre_data) + pre_csum = np.cumsum(pre_data) + tdSql.query(self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + for j in range(len(pre_csum)): + print(f"case in {line}:", end='') + tdSql.checkData(pre_row+j, 1, pre_csum[j]) + pre_row += len(pre_csum) + return + elif "union" in condition: + union_sql_0 = self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + ).split("union all")[0] + + union_sql_1 = self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + ).split("union all")[1] + + tdSql.query(union_sql_0) + union_csum_0 = tdSql.queryResult + row_union_0 = tdSql.queryRows + + tdSql.query(union_sql_1) + union_csum_1 = tdSql.queryResult + + tdSql.query(self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + for i in range(tdSql.queryRows): + print(f"case in {line}: ", end='') + if i < row_union_0: + tdSql.checkData(i, 0, union_csum_0[i][0]) + else: + tdSql.checkData(i, 0, union_csum_1[i-row_union_0][0]) + return + + else: + tdSql.query(f"select {col} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}") + offset_val = condition.split("offset")[1].split(" ")[1] if "offset" in condition else 0 + pre_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + pre_csum = np.cumsum(pre_result)[offset_val:] + tdSql.query(self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + + for i in range(tdSql.queryRows): + print(f"case in {line}: ", end='') + if pre_csum[i] >1.7e+308 or pre_csum[i] < -1.7e+308: + continue + else: + tdSql.checkData(i, 0, pre_csum[i]) + + pass + + def csum_current_query(self) : + + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + + # case1~6: numeric col:int/bigint/tinyint/smallint/float/double + self.checkcsum() + case2 = {"col": "c2"} + self.checkcsum(**case2) + case3 = {"col": "c5"} + self.checkcsum(**case3) + case4 = {"col": "c7"} + self.checkcsum(**case4) + case5 = {"col": "c8"} + self.checkcsum(**case5) + case6 = {"col": "c9"} + self.checkcsum(**case6) + + # case7~8: nested query + # case7 = {"table_expr": "(select c1 from stb1)"} + # self.checkcsum(**case7) + # case8 = {"table_expr": "(select csum(c1) c1 from stb1 group by tbname)"} + # self.checkcsum(**case8) + + # case9~10: mix with tbname/ts/tag/col + # case9 = {"alias": ", tbname"} + # self.checkcsum(**case9) + # case10 = {"alias": ", _c0"} + # self.checkcsum(**case10) + # case11 = {"alias": ", st1"} + # self.checkcsum(**case11) + # case12 = {"alias": ", c1"} + # self.checkcsum(**case12) + + # case13~15: with single condition + case13 = {"condition": "where c1 <= 10"} + self.checkcsum(**case13) + case14 = {"condition": "where c6 in (0, 1)"} + self.checkcsum(**case14) + case15 = {"condition": "where c1 between 1 and 10"} + self.checkcsum(**case15) + + # case16: with multi-condition + case16 = {"condition": "where c6=1 or c6 =0"} + self.checkcsum(**case16) + + # case17: only support normal table join + case17 = { + "col": "t1.c1", + "table_expr": "t1, t2", + "condition": "where t1.ts=t2.ts" + } + self.checkcsum(**case17) + # case18~19: with group by + # case18 = { + # "table_expr": "t1", + # "condition": "group by c6" + # } + # self.checkcsum(**case18) + # case19 = { + # "table_expr": "stb1", + # "condition": "partition by tbname" # partition by tbname + # } + # self.checkcsum(**case19) + + # # case20~21: with order by + # case20 = {"condition": "order by ts"} + # self.checkcsum(**case20) + + # # case22: with union + # case22 = { + # "condition": "union all select csum(c1) from t2" + # } + # self.checkcsum(**case22) + + # case23: with limit/slimit + case23 = { + "condition": "limit 1" + } + self.checkcsum(**case23) + # case24 = { + # "table_expr": "stb1", + # "condition": "group by tbname slimit 1 soffset 1" + # } + # self.checkcsum(**case24) + + pass + + def csum_error_query(self) -> None : + # unusual test + # + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + # + # form test + tdSql.error(self.csum_query_form(col="")) # no col + tdSql.error("csum(c1) from stb1") # no select + tdSql.error("select csum from t1") # no csum condition + tdSql.error("select csum c1 from t1") # no brackets + tdSql.error("select csum(c1) t1") # no from + tdSql.error("select csum( c1 ) from ") # no table_expr + # tdSql.error(self.csum_query_form(col="st1")) # tag col + tdSql.error(self.csum_query_form(col=1)) # col is a value + tdSql.error(self.csum_query_form(col="'c1'")) # col is a string + tdSql.error(self.csum_query_form(col=None)) # col is NULL 1 + tdSql.error(self.csum_query_form(col="NULL")) # col is NULL 2 + tdSql.error(self.csum_query_form(col='""')) # col is "" + tdSql.error(self.csum_query_form(col='c%')) # col is spercial char 1 + tdSql.error(self.csum_query_form(col='c_')) # col is spercial char 2 + tdSql.error(self.csum_query_form(col='c.')) # col is spercial char 3 + tdSql.error(self.csum_query_form(col='c3')) # timestamp col + tdSql.error(self.csum_query_form(col='ts')) # Primary key + tdSql.error(self.csum_query_form(col='avg(c1)')) # expr col + tdSql.error(self.csum_query_form(col='c6')) # bool col + tdSql.error(self.csum_query_form(col='c4')) # binary col + tdSql.error(self.csum_query_form(col='c10')) # nachr col + tdSql.error(self.csum_query_form(col='c10')) # not table_expr col + tdSql.error(self.csum_query_form(col='t1')) # tbname + tdSql.error(self.csum_query_form(col='stb1')) # stbname + tdSql.error(self.csum_query_form(col='db')) # datbasename + tdSql.error(self.csum_query_form(col=True)) # col is BOOL 1 + tdSql.error(self.csum_query_form(col='True')) # col is BOOL 2 + tdSql.error(self.csum_query_form(col='*')) # col is all col + tdSql.error("select csum[c1] from t1") # sql form error 1 + tdSql.error("select csum{c1} from t1") # sql form error 2 + tdSql.error(self.csum_query_form(col="[c1]")) # sql form error 3 + # tdSql.error(self.csum_query_form(col="c1, c2")) # sql form error 3 + # tdSql.error(self.csum_query_form(col="c1, 2")) # sql form error 3 + tdSql.error(self.csum_query_form(alias=", count(c1)")) # mix with aggregate function 1 + tdSql.error(self.csum_query_form(alias=", avg(c1)")) # mix with aggregate function 2 + tdSql.error(self.csum_query_form(alias=", min(c1)")) # mix with select function 1 + tdSql.error(self.csum_query_form(alias=", top(c1, 5)")) # mix with select function 2 + tdSql.error(self.csum_query_form(alias=", spread(c1)")) # mix with calculation function 1 + tdSql.error(self.csum_query_form(alias=", diff(c1)")) # mix with calculation function 2 + # tdSql.error(self.csum_query_form(alias=" + 2")) # mix with arithmetic 1 + tdSql.error(self.csum_query_form(alias=" + avg(c1)")) # mix with arithmetic 2 + tdSql.error(self.csum_query_form(alias=", c2")) # mix with other 1 + # tdSql.error(self.csum_query_form(table_expr="stb1")) # select stb directly + stb_join = { + "col": "stb1.c1", + "table_expr": "stb1, stb2", + "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts" + } + tdSql.error(self.csum_query_form(**stb_join)) # stb join + interval_sql = { + "condition": "where ts>0 and ts < now interval(1h) fill(next)" + } + tdSql.error(self.csum_query_form(**interval_sql)) # interval + group_normal_col = { + "table_expr": "t1", + "condition": "group by c6" + } + tdSql.error(self.csum_query_form(**group_normal_col)) # group by normal col + slimit_soffset_sql = { + "table_expr": "stb1", + "condition": "group by tbname slimit 1 soffset 1" + } + # tdSql.error(self.csum_query_form(**slimit_soffset_sql)) + order_by_tbname_sql = { + "table_expr": "stb1", + "condition": "group by tbname order by tbname" + } + tdSql.error(self.csum_query_form(**order_by_tbname_sql)) + + pass + + def csum_test_data(self, tbnum:int, data_row:int, basetime:int) -> None : + for i in range(tbnum): + for j in range(data_row): + tdSql.execute( + f"insert into t{i} values (" + f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " + f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " + f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" + ) + + tdSql.execute( + f"insert into t{i} values (" + f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, " + f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, " + f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )" + ) + tdSql.execute( + f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" + ) + + pass + + def csum_test_table(self,tbnum: int) -> None : + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute( + "create stable db.stb1 (\ + ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \ + c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\ + ) \ + tags(st1 int)" + ) + tdSql.execute( + "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)" + ) + for i in range(tbnum): + tdSql.execute(f"create table t{i} using stb1 tags({i})") + tdSql.execute(f"create table tt{i} using stb2 tags({i})") + + pass + + def csum_test_run(self) : + tdLog.printNoPrefix("==========TD-10594==========") + tbnum = 10 + nowtime = int(round(time.time() * 1000)) + per_table_rows = 2 + self.csum_test_table(tbnum) + + tdLog.printNoPrefix("######## no data test:") + self.csum_current_query() + self.csum_error_query() + + tdLog.printNoPrefix("######## insert only NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime - 5})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime + 5})") + self.csum_current_query() + self.csum_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the max(bigint/double):") + self.csum_test_table(tbnum) + tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + self.csum_current_query() + self.csum_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the min(bigint/double):") + self.csum_test_table(tbnum) + tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})") + tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})") + self.csum_current_query() + self.csum_error_query() + + tdLog.printNoPrefix("######## insert data without NULL data test:") + self.csum_test_table(tbnum) + self.csum_test_data(tbnum, per_table_rows, nowtime) + self.csum_current_query() + self.csum_error_query() + + + tdLog.printNoPrefix("######## insert data mix with NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime-(per_table_rows+3)*10})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime+(per_table_rows+3)*10})") + self.csum_current_query() + self.csum_error_query() + + + + tdLog.printNoPrefix("######## check after WAL test:") + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + self.csum_current_query() + self.csum_error_query() + + def run(self): + import traceback + try: + # run in develop branch + self.csum_test_run() + pass + except Exception as e: + traceback.print_exc() + raise e + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/2-query/diff.py b/tests/system-test/2-query/diff.py index 03b3899dc659d79ca8ae0750710fe293b5f83a3b..0d8b0de3dca8d0db11eb98e9b04defff07df741c 100644 --- a/tests/system-test/2-query/diff.py +++ b/tests/system-test/2-query/diff.py @@ -15,59 +15,51 @@ class TDTestCase: self.perfix = 'dev' self.tables = 10 - def insertData(self): - print("==============step1") - tdSql.execute( - "create table if not exists st (ts timestamp, col int) tags(dev nchar(50))") - - for i in range(self.tables): - tdSql.execute("create table %s%d using st tags(%d)" % (self.perfix, i, i)) - rows = 15 + i - for j in range(rows): - tdSql.execute("insert into %s%d values(%d, %d)" %(self.perfix, i, self.ts + i * 20 * 10000 + j * 10000, j)) def run(self): tdSql.prepare() - tdSql.execute("create table ntb(ts timestamp,c1 int,c2 double,c3 float)") - tdSql.execute("insert into ntb values(now,1,1.0,10.5)(now+1s,10,-100.0,5.1)(now+10s,-1,15.1,5.0)") + tdSql.execute( + "create table ntb(ts timestamp,c1 int,c2 double,c3 float)") + tdSql.execute( + "insert into ntb values(now,1,1.0,10.5)(now+1s,10,-100.0,5.1)(now+10s,-1,15.1,5.0)") tdSql.query("select diff(c1,0) from ntb") tdSql.checkRows(2) - tdSql.checkData(0,0,9) - tdSql.checkData(1,0,-11) + tdSql.checkData(0, 0, 9) + tdSql.checkData(1, 0, -11) tdSql.query("select diff(c1,1) from ntb") tdSql.checkRows(2) - tdSql.checkData(0,0,9) - tdSql.checkData(1,0,None) - + tdSql.checkData(0, 0, 9) + tdSql.checkData(1, 0, None) + tdSql.query("select diff(c2,0) from ntb") tdSql.checkRows(2) - tdSql.checkData(0,0,-101) - tdSql.checkData(1,0,115.1) + tdSql.checkData(0, 0, -101) + tdSql.checkData(1, 0, 115.1) tdSql.query("select diff(c2,1) from ntb") tdSql.checkRows(2) - tdSql.checkData(0,0,None) - tdSql.checkData(1,0,115.1) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, 115.1) tdSql.query("select diff(c3,0) from ntb") tdSql.checkRows(2) - tdSql.checkData(0,0,-5.4) - tdSql.checkData(1,0,-0.1) + tdSql.checkData(0, 0, -5.4) + tdSql.checkData(1, 0, -0.1) tdSql.query("select diff(c3,1) from ntb") tdSql.checkRows(2) - tdSql.checkData(0,0,None) - tdSql.checkData(1,0,None) - + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') tdSql.execute("create table stb_1 using stb tags('beijing')") - tdSql.execute("insert into stb_1 values(%d, 0, 0, 0, 0, 0.0, 0.0, False, ' ', ' ', 0, 0, 0, 0)" % (self.ts - 1)) - - # diff verifacation + tdSql.execute( + "insert into stb_1 values(%d, 0, 0, 0, 0, 0.0, 0.0, False, ' ', ' ', 0, 0, 0, 0)" % (self.ts - 1)) + + # diff verifacation tdSql.query("select diff(col1) from stb_1") tdSql.checkRows(0) - + tdSql.query("select diff(col2) from stb_1") tdSql.checkRows(0) @@ -87,38 +79,23 @@ class TDTestCase: tdSql.checkRows(0) for i in range(self.rowNum): - tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" - % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) - - # tdSql.error("select diff(ts) from stb") + tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + + tdSql.error("select diff(ts) from stb") tdSql.error("select diff(ts) from stb_1") - # tdSql.error("select diff(col7) from stb") - - # tdSql.error("select diff(col8) from stb") + + # tdSql.error("select diff(col7) from stb") + + tdSql.error("select diff(col8) from stb") tdSql.error("select diff(col8) from stb_1") - # tdSql.error("select diff(col9) from stb") + tdSql.error("select diff(col9) from stb") tdSql.error("select diff(col9) from stb_1") tdSql.error("select diff(col11) from stb_1") tdSql.error("select diff(col12) from stb_1") tdSql.error("select diff(col13) from stb_1") tdSql.error("select diff(col14) from stb_1") - - tdSql.query("select ts,diff(col1),ts from stb_1") - tdSql.checkRows(11) - tdSql.checkData(0, 0, "2018-09-17 09:00:00.000") - tdSql.checkData(1, 0, "2018-09-17 09:00:00.000") - tdSql.checkData(1, 2, "2018-09-17 09:00:00.000") - tdSql.checkData(9, 0, "2018-09-17 09:00:00.009") - tdSql.checkData(9, 2, "2018-09-17 09:00:00.009") - - # tdSql.query("select ts,diff(col1),ts from stb group by tbname") - # tdSql.checkRows(10) - # tdSql.checkData(0, 0, "2018-09-17 09:00:00.000") - # tdSql.checkData(0, 1, "2018-09-17 09:00:00.000") - # tdSql.checkData(0, 3, "2018-09-17 09:00:00.000") - # tdSql.checkData(9, 0, "2018-09-17 09:00:00.009") - # tdSql.checkData(9, 1, "2018-09-17 09:00:00.009") - # tdSql.checkData(9, 3, "2018-09-17 09:00:00.009") + tdSql.error("select ts,diff(col1),ts from stb_1") tdSql.query("select diff(col1) from stb_1") tdSql.checkRows(10) @@ -137,10 +114,27 @@ class TDTestCase: tdSql.query("select diff(col6) from stb_1") tdSql.checkRows(10) - + + tdSql.execute('''create table stb1(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') + tdSql.execute("create table stb1_1 using stb tags('shanghai')") + + for i in range(self.rowNum): + tdSql.execute("insert into stb1_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + for i in range(self.rowNum): + tdSql.execute("insert into stb1_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts - i-1, i-1, i-1, i-1, i-1, -i - 0.1, -i - 0.1, -i % 2, i - 1, i - 1, i + 1, i + 1, i + 1, i + 1)) + tdSql.query("select diff(col1,0) from stb1_1") + tdSql.checkRows(19) + tdSql.query("select diff(col1,1) from stb1_1") + tdSql.checkRows(19) + tdSql.checkData(0,0,None) + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) + tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/elapsed.py b/tests/system-test/2-query/elapsed.py new file mode 100644 index 0000000000000000000000000000000000000000..017090128d40f66eb7f395c75c41cafff2934a47 --- /dev/null +++ b/tests/system-test/2-query/elapsed.py @@ -0,0 +1,1604 @@ +################################################################### +# Copyright (c) 2020 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1420041600000 # 2015-01-01 00:00:00 this is begin time for first record + self.num = 10 + + def caseDescription(self): + + ''' + case1 : [TD-11804] test case for elapsed function : + + this test case is for aggregate function elapsed , elapsed function can only used for the timestamp primary key column (ts) , + it has two input parameters, the first parameter is necessary, basic SQL as follow: + + =================================================================================================================================== + SELECT ELAPSED(field_name[, time_unit]) FROM { tb_name | stb_name } [WHERE clause] [INTERVAL(interval [, offset]) [SLIDING sliding]]; + =================================================================================================================================== + + elapsed function can acting on ordinary tables and super tables , notice that this function is related to the timeline. + If it acts on a super table , it must be group by tbname . by the way ,this function support nested query. + + The scenarios covered by the test cases are as follows: + + ==================================================================================================================================== + + case: select * from table|stable[group by tbname]|regular_table + + case:select elapsed(ts) from table|stable where clause interval (units) [fill(LINEAR,NEXT,PREV,VALUE,NULL)] [group by tbname] order [by ts desc asc|desc]; + + case:select elapsed(ts) , elapsed(ts,unit_time1)*regular_num1 , elapsed(ts,unit_time1)+regular_num2 from table|stable where clause interval (units) [fill(LINEAR,NEXT,PREV,VALUE,NULL)] [group by tbname] order [by ts desc asc|desc]; + + //mixup with all functions only once query (it's different with nest query) + case:select elapsed(ts), count(*), avg(col), twa(col), irate(col), sum(col), stddev(col), leastsquares(col, 1, 1),min(col), max(col), first(col), last(col), percentile(col, 20), apercentile(col, 30), last_row(col), spread(col)from table|stable where clause interval (units) [fill(LINEAR,NEXT,PREV,VALUE,NULL)] [group by tbname] order [by ts desc asc|desc]; + + //mixup with ordinary col + case:select ts ,elapsed(ts)*10 ,col+5 from table|stable where clause interval (units) [fill(LINEAR,NEXT,PREV,VALUE,NULL)] [group by tbname] order [by ts desc asc|desc]; + + //nest query + case:select elapsed(ts) from (select elapsed(ts), count(*), avg(col), twa(col), irate(col), sum(col), stddev(col), leastsquares(col, 1, 1),min(col), max(col), first(col), last(col), percentile(col, 20), apercentile(col, 30), last_row(col), spread(col)from table|stable where clause interval (units) [fill(LINEAR,NEXT,PREV,VALUE,NULL)] [group by tbname] order [by ts desc asc|desc]) where clause interval (units) [fill(LINEAR,NEXT,PREV,VALUE,NULL)] [group by tbname] order [by ts desc asc|desc]; + + //clause about filter condition + case:select elapsed(ts) from table|stable[group by tbname] where [ts|col|tag >|<|=|>=|<=|=|<>|!= value] | [between ... and ...] |[in] |[is null|not null] interval (unit_time) ; + case:select elapsed(ts) from table|stable[group by tbname] where clause1 and clause 2 and clause3 interval (unit_time) ; + + //JOIN query + case:select elapsed(ts) from TABLE1 as tb1 , TABLE2 as tb2 where join_condition [TABLE1 and TABLE2 can be stable|table|sub_table|empty_table] + + //UNION ALL query + case:select elapsed(ts) from TABLE1 union all select elapsed(ts) from TABLE2 [TABLE1 and TABLE2 can be stable|table|sub_table|empty_table] + + // Window aggregation + + case:select elapsed(ts) from t1 where clause session(ts, time_units) ; + case:select elapsed(ts) from t1 where clause state_window(regular_nums); + + // Continuous query + case:create table select elapsed(ts) ,avg(col) from (select elapsed(ts) ts_inter ,avg(col) col from stable|table interval (unit_time) [fill(LINEAR,NEXT,PREV,VALUE,NULL)][group by tbname]) interval (unit_time) [fill(LINEAR,NEXT,PREV,VALUE,NULL) sliding(unit_time_windows); + + ======================================================================================================================================== + + this test case notice successful execution and correctness of results. + + ''' + return + + def prepare_data(self): + + tdLog.info (" ====================================== prepare data ==================================================") + + tdSql.execute('drop database if exists testdb ;') + tdSql.execute('create database testdb keep 36500;') + tdSql.execute('use testdb;') + + tdSql.execute('create stable stable_1(ts timestamp ,tscol timestamp, q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, q_float float ,\ + q_double double , bin_chars binary(20)) tags(loc nchar(20) ,ind int,tstag timestamp);') + tdSql.execute('create stable stable_2(ts timestamp ,tscol timestamp, q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, q_float float ,\ + q_double double, bin_chars binary(20) ) tags(loc nchar(20),ind int,tstag timestamp);') + # create empty stables + tdSql.execute('create stable stable_empty(ts timestamp ,tscol timestamp, q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, q_float float ,\ + q_double double, bin_chars binary(20) ) tags(loc nchar(20),ind int,tstag timestamp);') + tdSql.execute('create stable stable_sub_empty(ts timestamp ,tscol timestamp, q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, q_float float ,\ + q_double double, bin_chars binary(20) ) tags(loc nchar(20),ind int,tstag timestamp);') + + # create empty sub_talbes and regular tables + tdSql.execute('create table sub_empty_1 using stable_sub_empty tags("sub_empty_1",3,"2015-01-01 00:02:00")') + tdSql.execute('create table sub_empty_2 using stable_sub_empty tags("sub_empty_2",3,"2015-01-01 00:02:00")') + tdSql.execute('create table regular_empty (ts timestamp , tscol timestamp ,q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , bin_chars binary(20)) ;') + + tdSql.execute('create table sub_table1_1 using stable_1 tags("sub1_1",1,"2015-01-01 00:00:00")') + tdSql.execute('create table sub_table1_2 using stable_1 tags("sub1_2",2,"2015-01-01 00:01:00")') + tdSql.execute('create table sub_table1_3 using stable_1 tags("sub1_3",3,"2015-01-01 00:02:00")') + + tdSql.execute('create table sub_table2_1 using stable_2 tags("sub2_1",1,"2015-01-01 00:00:00")') + tdSql.execute('create table sub_table2_2 using stable_2 tags("sub2_2",2,"2015-01-01 00:01:00")') + tdSql.execute('create table sub_table2_3 using stable_2 tags("sub2_3",3,"2015-01-01 00:02:00")') + + tdSql.execute('create table regular_table_1 (ts timestamp , tscol timestamp ,q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double, bin_chars binary(20)) ;') + tdSql.execute('create table regular_table_2 (ts timestamp , tscol timestamp ,q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , bin_chars binary(20)) ;') + tdSql.execute('create table regular_table_3 (ts timestamp , tscol timestamp ,q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , bin_chars binary(20)) ;') + + tablenames = ["sub_table1_1","sub_table1_2","sub_table1_3","sub_table2_1","sub_table2_2","sub_table2_3","regular_table_1","regular_table_2","regular_table_3"] + + tdLog.info("insert into records ") + + for tablename in tablenames: + + for i in range(self.num): + sql= 'insert into %s values(%d, %d,%d, %d, %d, %d, %f, %f, "%s")' % (tablename,self.ts + i*10000, self.ts + i*10,2147483647-i, 9223372036854775807-i, 32767-i, 127-i, i, i,("bintest"+str(i))) + print(sql) + tdSql.execute(sql) + + tdLog.info("=============================================data prepared done!=========================") + + def abnormal_common_test(self): + + tdLog.info (" ====================================== elapsed illeagal params ==================================================") + + tablenames = ["sub_table1_1","sub_table1_2","sub_table1_3","sub_table2_1","sub_table2_2","sub_table2_3","regular_table_1","regular_table_2","regular_table_3"] + + abnormal_list = ["()","(NULL)","(*)","(abc)","( , )","(NULL,*)","( ,NULL)","(%)","(+)","(*,)","(*, /)","(ts,*)" "(ts,tbname*10)","(ts,tagname)", + "(ts,2d+3m-2s,NULL)","(ts+1d,10s)","(ts+10d,NULL)" ,"(ts,now -1m%1d)","(ts+10d)","(ts+10d,_c0)","(ts+10d,)","(ts,%)","(ts, , m)","(ts,abc)","(ts,/)","(ts,*)","(ts,1s,100)", + "(ts,1s,abc)","(ts,1s,_c0)","(ts,1s,*)","(ts,1s,NULL)","(ts,,_c0)","(ts,tbname,ts)","(ts,0,tbname)","('2021-11-18 00:00:10')","('2021-11-18 00:00:10', 1s)", + "('2021-11-18T00:00:10+0800', '1s')","('2021-11-18T00:00:10Z', '1s')","('2021-11-18T00:00:10+0800', 10000000d,)","('ts', ,2021-11-18T00:00:10+0800, )"] + + for tablename in tablenames: + for abnormal_param in abnormal_list: + + if tablename.startswith("stable"): + basic_sql= "select elapsed" + abnormal_param + " from " + tablename + " group by tbname ,ind order by tbname;" #stables + else: + basic_sql= "select elapsed" + abnormal_param + " from " + tablename + ";" # regular table + tdSql.error(basic_sql) + + def abnormal_use_test(self): + + tdLog.info (" ====================================== elapsed use abnormal ==================================================") + + sqls_list = ["select elapsed(ts) from regular_empty group by tbname,ind order by desc; ", + "select elapsed(ts) from regular_empty group by tbname,ind order by desc; ", + "select elapsed(ts) from regular_table_1 group by tbname,ind order by desc; ", + "select elapsed(ts) from sub_table1_1 group by tbname,ind order by desc; ", + "select elapsed(ts) from sub_table1_1 group by tbname,ind order by desc; ", + # "select elapsed(ts,10s) from stable_empty group by ts order by ts;", + "select elapsed(ts,10s) from stable_1 group by ind order by ts;", + "select elapsed(ts,10s) from stable_2 group by tstag order by ts;", + "select elapsed(ts,10s) from stable_1 group by tbname,tstag,tscol order by ts;", + "select elapsed(ts,10s),ts from stable_1 group by tbname ,ind order by ts;", + "select ts,elapsed(ts,10s),tscol*100 from stable_1 group by tbname ,ind order by ts;", + "select elapsed(ts) from stable_1 group by tstag order by ts;", + "select elapsed(ts) from sub_empty_1 group by tbname,ind ,tscol order by ts desc;", + "select tbname, tscol,elapsed(ts) from sub_table1_1 group by tbname ,ind order by ts desc;", + "select elapsed(tscol) from sub_table1_1 order by ts desc;", + "select elapsed(tstag) from sub_table1_1 order by ts desc;", + "select elapsed(ind) from sub_table1_1 order by ts desc;", + "select elapsed(tscol) from sub_empty_1 order by ts desc;", + "select elapsed(tstag) from sub_empty_1 order by ts desc;", + "select elapsed(ind) from sub_table1_1 order by ts desc;", + "select elapsed(ind,10s) from sub_table1_1 order by ts desc;", + "select elapsed(tscol,10s) from sub_table1_1 order by ts desc;", + "select elapsed(tstag,10s) from sub_table1_1 order by ts desc;", + "select elapsed(q_int,10s) from sub_table1_1 order by ts desc;", + "select elapsed(loc,10s) from sub_table1_1 order by ts desc;", + "select elapsed(q_bigint,10s) from sub_table1_1 order by ts desc;", + "select elapsed(bin_chars,10s) from sub_table1_1 order by ts desc;"] + for sql in sqls_list : + tdSql.error(sql) + + def query_filter(self): + + tdLog.info (" ====================================== elapsed query filter ==================================================") + + for i in range(self.num): + ts_start_time = self.ts + i*10000 + ts_col_start_time = self.ts + i*10 + ts_tag_time = "2015-01-01 00:01:00" + ts_end_time = self.ts + (self.num-1-i)*10000 + ts_col_end_time = self.ts + (self.num-1-i)*10 + + filter_sql = "select elapsed(ts,10s) from stable_1 where ts >= %d group by tbname " %(ts_start_time) + tdSql.query(filter_sql) + tdSql.checkRows(3) + tdSql.checkData(0,0,float(self.num -i-1)) + tdSql.checkData(1,0,float(self.num -i-1)) + tdSql.checkData(2,0,float(self.num -i-1)) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where ts >= %d " %(ts_start_time) + tdSql.query(filter_sql) + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num -i-1)) + + + filter_sql = "select elapsed(ts,10s) from stable_1 where ts >= %d and tscol >= %d and tstag='2015-01-01 00:01:00'group by tbname " %(ts_start_time,ts_col_start_time) + tdSql.query(filter_sql) + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num -i-1)) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where ts >= %d and tscol >= %d " %(ts_start_time,ts_col_start_time) + tdSql.query(filter_sql) + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num -i-1)) + + filter_sql = "select elapsed(ts,10s) from stable_1 where ts >= %d and tscol > %d and tstag='2015-01-01 00:01:00' group by tbname" %(ts_start_time,ts_col_start_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num -i-2)) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where ts >= %d and tscol > %d " %(ts_start_time,ts_col_start_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num -i-2)) + + filter_sql = "select elapsed(ts,10s) from stable_1 where ts > %d and tscol > %d and tstag < '2015-01-01 00:01:00' group by tbname " %(ts_start_time,ts_col_start_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num -i-2)) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where ts > %d and tscol > %d " %(ts_start_time,ts_col_start_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num -i-2)) + + filter_sql = "select elapsed(ts,10s) from stable_1 where ts > %d and tscol <= %d and tstag < '2015-01-01 00:01:00' group by tbname" %(ts_start_time,ts_col_start_time) + tdSql.query(filter_sql) + tdSql.checkRows(0) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where ts > %d and tscol <= %d " %(ts_start_time,ts_col_start_time) + tdSql.query(filter_sql) + tdSql.checkRows(0) + + filter_sql = "select elapsed(ts,10s) from stable_1 where ts < %d and tscol <= %d and tstag < '2015-01-01 00:01:00' group by tbname" %(ts_end_time,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num -i-2)) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where ts < %d and tscol <= %d " %(ts_end_time,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num -i-2)) + + filter_sql = "select elapsed(ts,10s) from stable_1 where ts < %d and tscol <= %d group by tbname " %(ts_end_time,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(3) + tdSql.checkData(0,0,float(self.num - i - 2)) + tdSql.checkData(1,0,float(self.num - i - 2)) + tdSql.checkData(2,0,float(self.num - i - 2)) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where ts < %d and tscol <= %d " %(ts_end_time,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num - i - 2)) + + filter_sql = "select elapsed(ts,10s) from stable_1 where ts = %d and tscol < %d group by tbname " %(ts_end_time,ts_col_end_time) + tdSql.query(filter_sql) + tdSql.checkRows(0) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where ts = %d and tscol < %d " %(ts_end_time,ts_col_end_time) + tdSql.query(filter_sql) + tdSql.checkRows(0) + + filter_sql = "select elapsed(ts,10s) from stable_1 where q_tinyint != %d and tscol < %d group by tbname " %(i,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(3) + tdSql.checkData(0,0,float(self.num -i-2)) + tdSql.checkData(1,0,float(self.num -i-2)) + tdSql.checkData(2,0,float(self.num -i-2)) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where q_tinyint != %d and tscol < %d " %(i,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num -i-2)) + + filter_sql = "select elapsed(ts,10s) from stable_1 where q_tinyint != %d and tscol <= %d group by tbname " %(i,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num: + tdSql.checkRows(0) + else: + tdSql.checkRows(3) + tdSql.checkData(0,0,float(self.num - i - 1)) + tdSql.checkData(1,0,float(self.num - i - 1)) + tdSql.checkData(2,0,float(self.num - i - 1)) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where q_tinyint != %d and tscol <= %d " %(i,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num: + tdSql.checkRows(0) + else: + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num - i - 1)) + + filter_sql = "select elapsed(ts,10s) from stable_1 where q_tinyint <> %d and tscol < %d group by tbname " %(i,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(3) + tdSql.checkData(0,0,float(self.num -i-2)) + tdSql.checkData(1,0,float(self.num -i-2)) + tdSql.checkData(2,0,float(self.num -i-2)) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where q_tinyint <> %d and tscol < %d " %(i,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num -i-2)) + + filter_sql = "select elapsed(ts,10s) from stable_1 where q_tinyint <> %d and tscol <= %d group by tbname " %(i,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num: + tdSql.checkRows(0) + else: + tdSql.checkRows(3) + tdSql.checkData(0,0,float(self.num - i - 1)) + tdSql.checkData(1,0,float(self.num - i - 1)) + tdSql.checkData(2,0,float(self.num - i - 1)) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where q_tinyint <> %d and tscol <= %d " %(i,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num: + tdSql.checkRows(0) + else: + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num - i - 1)) + + # filter between and + tdSql.query("select elapsed(ts,10s) from sub_table1_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and q_tinyint between 125 and 127 and tscol <= '2015-01-01 00:01:00.000' ") + tdSql.checkData(0,0,2) + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and \ + q_tinyint between 125 and 127 and tscol <= '2015-01-01 00:01:00.000' group by tbname ") + tdSql.checkData(0,0,2) + tdSql.checkData(1,0,2) + tdSql.checkData(2,0,2) + + # filter in and or + tdSql.query("select elapsed(ts,10s) from sub_table1_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and q_tinyint between 125 and 127 and tscol <= '2015-01-01 00:01:00.000' ") + tdSql.checkData(0,0,2) + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and q_tinyint between 125 and 127 and tscol <= '2015-01-01 00:01:00.000' group by tbname ") + tdSql.checkData(0,0,2) + tdSql.checkData(1,0,2) + tdSql.checkData(2,0,2) + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and q_tinyint in (125,126,127) and tscol <= '2015-01-01 00:01:00.000' group by tbname ") + tdSql.checkData(0,0,2) + tdSql.checkData(1,0,2) + tdSql.checkData(2,0,2) + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and bin_chars in ('bintest0','bintest1') and tscol <= '2015-01-01 00:01:00.000' group by tbname ") + tdSql.checkData(0,0,1) + tdSql.checkData(1,0,1) + tdSql.checkData(2,0,1) + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and bin_chars in ('bintest0','bintest1') and tscol <= '2015-01-01 00:01:00.000' group by tbname ") + tdSql.checkData(0,0,1) + tdSql.checkData(1,0,1) + tdSql.checkData(2,0,1) + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and bin_chars like 'bintest_' and tscol <= '2015-01-01 00:01:00.000' group by tbname ") + tdSql.checkData(0,0,6) + tdSql.checkData(1,0,6) + tdSql.checkData(2,0,6) + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and bin_chars like 'bintest_' and tscol <= '2015-01-01 00:01:00.000' group by tbname ") + tdSql.checkData(0,0,6) + tdSql.checkData(1,0,6) + tdSql.checkData(2,0,6) + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and bin_chars is not null and tscol <= '2015-01-01 00:01:00.000' group by tbname; ") + tdSql.checkData(0,0,6) + tdSql.checkData(1,0,6) + tdSql.checkData(2,0,6) + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and bin_chars is null and tscol <= '2015-01-01 00:01:00.000' group by tbname; ") + tdSql.checkRows(0) + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and bin_chars match '^b' and tscol <= '2015-01-01 00:01:00.000' group by tbname; ") + tdSql.checkRows(3) + tdSql.checkData(0,0,6) + tdSql.checkData(1,0,6) + tdSql.checkData(2,0,6) + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and bin_chars nmatch '^a' and tscol <= '2015-01-01 00:01:00.000' group by tbname; ") + tdSql.checkRows(3) + tdSql.checkData(0,0,6) + tdSql.checkData(1,0,6) + tdSql.checkData(2,0,6) + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and bin_chars ='bintest1' or bin_chars ='bintest2' and tscol <= '2015-01-01 00:01:00.000' group by tbname; ") + tdSql.checkRows(3) + tdSql.query("select elapsed(ts,10s) from stable_1 where (ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000') or (ts between '2015-01-01 00:01:00.000' and '2015-01-01 00:02:00.000') group by tbname; ") + tdSql.checkRows(3) + tdSql.checkData(0,0,9) + tdSql.checkData(1,0,9) + tdSql.checkData(2,0,9) + + def query_interval(self): + + tdLog.info (" ====================================== elapsed interval sliding fill ==================================================") + + # empty interval + tdSql.query("select max(q_int)*10 from stable_empty where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(prev);") + tdSql.checkRows(0) + tdSql.query("select max(q_int)*10 from sub_empty_2 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(prev);") + tdSql.checkRows(0) + + tdSql.query("select elapsed(ts,10s)*10 from stable_empty where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(prev) group by tbname;") + tdSql.checkRows(0) + tdSql.query("select elapsed(ts,10s)*10 from sub_empty_2 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(prev);") + tdSql.checkRows(0) + + for i in range(self.num): + ts_start_time = self.ts + i*10000 + ts_col_start_time = self.ts + i*10 + ts_tag_time = "2015-01-01 00:01:00" + ts_end_time = self.ts + (self.num-1-i)*10000 + ts_col_end_time = self.ts + (self.num-1-i)*10 + + + # only interval + interval_sql = "select elapsed(ts,10s) from stable_1 where ts <=%d interval(10s) group by tbname " %(ts_start_time) + tdSql.query(interval_sql) + tdSql.checkRows(3*(i+1)) + + interval_sql = "select elapsed(ts,10s) from sub_table1_1 where ts <=%d interval(10s) " %(ts_start_time) + tdSql.query(interval_sql) + tdSql.checkRows(i+1) + for x in range(i+1): + if x == i: + tdSql.checkData(x,1,0) + else : + tdSql.checkData(x,1,1) + + # interval and fill , fill_type = ["NULL","value,100","prev","next","linear"] + + # interval (10s) and time range is outer records + + tdSql.query("select elapsed(ts,10s)*10 from stable_empty where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(prev) group by tbname;") + tdSql.checkRows(0) + + tdSql.query("select elapsed(ts,10s)*10 from sub_empty_2 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(prev);") + tdSql.checkRows(0) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(prev) group by tbname;") + tdSql.checkRows(180) + tdSql.checkData(0,1,10) + tdSql.checkData(9,1,0) + tdSql.checkData(59,1,0) + tdSql.checkData(60,1,10) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(next) group by tbname;") + tdSql.checkRows(180) + tdSql.checkData(0,1,10) + tdSql.checkData(9,1,0) + tdSql.checkData(10,1,None) + tdSql.checkData(59,1,None) + tdSql.checkData(60,1,10) + tdSql.checkData(61,1,10) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(linear) group by tbname;") + tdSql.checkRows(180) + tdSql.checkData(0,1,10) + tdSql.checkData(9,1,0) + tdSql.checkData(10,1,None) + tdSql.checkData(59,1,None) + tdSql.checkData(60,1,10) + tdSql.checkData(61,1,10) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(NULL) group by tbname;") + tdSql.checkRows(180) + tdSql.checkData(0,1,10) + tdSql.checkData(9,1,0) + tdSql.checkData(10,1,None) + tdSql.checkData(59,1,None) + tdSql.checkData(60,1,10) + tdSql.checkData(61,1,10) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(value ,2) group by tbname;") + tdSql.checkRows(180) + tdSql.checkData(0,1,10) + tdSql.checkData(9,1,0) + tdSql.checkData(10,1,2) + tdSql.checkData(59,1,2) + tdSql.checkData(60,1,10) + tdSql.checkData(61,1,10) + + # interval (20s) and time range is outer records + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(20s) fill(prev) group by tbname,ind ;") + tdSql.checkRows(90) + tdSql.checkData(0,1,20) + tdSql.checkData(4,1,10) + tdSql.checkData(5,1,10) + tdSql.checkData(29,1,10) + tdSql.checkData(30,1,20) + tdSql.checkData(31,1,20) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(20s) fill(next) group by tbname,ind ;") + tdSql.checkRows(90) + tdSql.checkData(0,1,20) + tdSql.checkData(4,1,10) + tdSql.checkData(5,1,None) + tdSql.checkData(29,1,None) + tdSql.checkData(30,1,20) + tdSql.checkData(31,1,20) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(20s) fill(linear) group by tbname,ind ;") + tdSql.checkRows(90) + tdSql.checkData(0,1,20) + tdSql.checkData(4,1,10) + tdSql.checkData(5,1,None) + tdSql.checkData(29,1,None) + tdSql.checkData(30,1,20) + tdSql.checkData(31,1,20) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(20s) fill(NULL) group by tbname,ind ;") + tdSql.checkRows(90) + tdSql.checkData(0,1,20) + tdSql.checkData(4,1,10) + tdSql.checkData(5,1,None) + tdSql.checkData(29,1,None) + tdSql.checkData(30,1,20) + tdSql.checkData(31,1,20) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(20s) fill(value ,2) group by tbname,ind ;") + tdSql.checkRows(90) + tdSql.checkData(0,1,20) + tdSql.checkData(4,1,10) + tdSql.checkData(5,1,2) + tdSql.checkData(29,1,2) + tdSql.checkData(30,1,20) + tdSql.checkData(31,1,20) + + # interval (20s) and time range is in records + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:01:00.000' interval(20s) fill(prev) group by tbname,ind ;") + tdSql.checkRows(9) + tdSql.checkData(0,1,20) + tdSql.checkData(2,1,10) + tdSql.checkData(3,1,20) + tdSql.checkData(5,1,10) + tdSql.checkData(7,1,20) + tdSql.checkData(8,1,10) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:01:00.000' interval(20s) fill(next) group by tbname,ind ;") + tdSql.checkRows(9) + tdSql.checkData(0,1,20) + tdSql.checkData(2,1,10) + tdSql.checkData(3,1,20) + tdSql.checkData(5,1,10) + tdSql.checkData(7,1,20) + tdSql.checkData(8,1,10) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:01:00.000' interval(20s) fill(linear) group by tbname,ind ;") + tdSql.checkRows(9) + tdSql.checkData(0,1,20) + tdSql.checkData(2,1,10) + tdSql.checkData(3,1,20) + tdSql.checkData(5,1,10) + tdSql.checkData(7,1,20) + tdSql.checkData(8,1,10) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:01:00.000' interval(20s) fill(NULL) group by tbname,ind ;") + tdSql.checkRows(9) + tdSql.checkData(0,1,20) + tdSql.checkData(2,1,10) + tdSql.checkData(3,1,20) + tdSql.checkData(5,1,10) + tdSql.checkData(7,1,20) + tdSql.checkData(8,1,10) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:01:00.000' interval(20s) fill(value ,2 ) group by tbname,ind ;") + tdSql.checkRows(9) + tdSql.checkData(0,1,20) + tdSql.checkData(2,1,10) + tdSql.checkData(3,1,20) + tdSql.checkData(5,1,10) + tdSql.checkData(7,1,20) + tdSql.checkData(8,1,10) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:01:00.000' interval(20s) group by tbname,ind ;") + tdSql.checkRows(9) + tdSql.checkData(0,1,20) + tdSql.checkData(2,1,10) + tdSql.checkData(3,1,20) + tdSql.checkData(5,1,10) + tdSql.checkData(7,1,20) + tdSql.checkData(8,1,10) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2014-12-31 23:59:00.000' and ts <'2015-01-01 00:01:00.000' interval(20s) fill(NULL) group by tbname,ind ;") + tdSql.checkRows(18) + tdSql.checkData(0,1,None) + tdSql.checkData(2,1,None) + tdSql.checkData(3,1,20) + tdSql.checkData(5,1,10) + tdSql.checkData(7,1,None) + tdSql.checkData(8,1,None) + tdSql.checkData(9,1,20) + + # interval sliding + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2014-12-31 23:59:00.000' and ts <'2015-01-01 00:01:00.000' interval(20s) sliding(20s) fill(NULL) group by tbname,ind ;") + tdSql.checkRows(18) + tdSql.checkData(0,1,None) + tdSql.checkData(2,1,None) + tdSql.checkData(3,1,20) + tdSql.checkData(5,1,10) + tdSql.checkData(7,1,None) + tdSql.checkData(8,1,None) + tdSql.checkData(9,1,20) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2014-12-31 23:59:00.000' and ts <'2015-01-01 00:01:00.000' interval(20s) sliding(10s) fill(NULL) group by tbname,ind ;") + tdSql.checkRows(39) + tdSql.checkData(0,1,None) + tdSql.checkData(2,1,None) + tdSql.checkData(6,1,10) + tdSql.checkData(7,1,20) + tdSql.checkData(12,1,0) + tdSql.checkData(13,1,None) + tdSql.checkData(15,1,None) + tdSql.checkData(19,1,10) + tdSql.checkData(20,1,20) + tdSql.checkData(25,1,0) + + def query_mix_common(self): + + tdLog.info (" ======================================elapsed mixup with common col, it will not support =======================================") + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and ind =1 group by tbname; ") + tdSql.checkRows(1) + tdSql.checkData(0,0,6) + + tdSql.query("select elapsed(ts,10s) from sub_table1_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' ; ") + tdSql.checkRows(1) + tdSql.checkData(0,0,6) + + tdSql.error("select ts,elapsed(ts,10s) from sub_empty_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' ; ") + tdSql.error("select ts,elapsed(ts,10s) from stable_empty where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' group by tbname; ") + + tdSql.error("select ts,elapsed(ts,10s) from sub_table1_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' ; ") + tdSql.error("select ts,elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' group by tbname; ") + + tdSql.error("select q_int,elapsed(ts,10s) from sub_table1_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' ; ") + tdSql.error("select q_int,elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' group by tbname; ") + + tdSql.error("select ts,q_int,elapsed(ts,10s) from sub_table1_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' ; ") + tdSql.error("select ts,q_int,elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' group by tbname; ") + + def query_mix_Aggregate(self): + + tdLog.info (" ====================================== elapsed mixup with aggregate ==================================================") + + tdSql.query("select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) from sub_table1_1 ; ") + + data = tdSql.getResult("select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) from sub_table1_1 ; ") + + querys = ["count(*)","avg(q_int)", "sum(q_double)","stddev(q_float)","LEASTSQUARES(q_int,0,1)", "elapsed(ts,10s)"] + + for index , query in enumerate(querys): + sql = "select %s from sub_table1_1 " %(query) + tdSql.query(sql) + tdSql.checkData(0,0,data[0][index]) + + tdSql.query("select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) from stable_1 group by tbname; ") + + # Arithmetic with elapsed for common table + + operators = ["+" ,"-" , "*" ,"/" ,"%"] + querys_oper = ["count(*)","avg(q_int)", "sum(q_double)","stddev(q_float)", "elapsed(ts,10s)"] + + for operator in operators: + + query_datas=[] + + sql_common= "select " + + for index , query in enumerate(querys_oper): + + query_data = tdSql.getResult("select %s from sub_table1_1;"%query) + + query_datas.append(query_data[0][0]) + sql_common += " %s %s " %(query,operator) + sql_common=sql_common[:-2] + " from sub_table1_1;" + + tdSql.query(sql_common) + results= query_datas[0] + if operator == "+": + for data in query_datas[1:]: + results += data + tdSql.checkData(0,0,results) + + results= query_datas[0] + if operator == "-": + for data in query_datas[1:]: + results -= data + tdSql.checkData(0,0,results) + + results= query_datas[0] + if operator == "*": + for data in query_datas[1:]: + results *= data + tdSql.checkData(0,0,results) + + results= query_datas[0] + if operator == "/": + for data in query_datas[1:]: + results /= data + tdSql.checkData(0,0,results) + + results= query_datas[0] + if operator == "%": + for data in query_datas[1:]: + results %= data + tdSql.checkData(0,0,results) + + + # Arithmetic with elapsed for super table + + operators = ["+" ,"-" , "*" ,"/" ,"%"] + querys_oper = ["count(*)","avg(q_int)", "sum(q_double)","stddev(q_float)", "elapsed(ts,10s)"] + + for operator in operators: + + query_datas=[] + + sql_common= "select " + + for index , query in enumerate(querys_oper): + + query_data = tdSql.getResult("select %s from stable_1 group by tbname;"%query) + + query_datas.append(query_data[0][0]) + sql_common += " %s %s " %(query,operator) + sql_common=sql_common[:-2] + " from stable_1 group by tbname;" + + tdSql.query(sql_common) + results= query_datas[0] + if operator == "+": + for data in query_datas[1:]: + results += data + tdSql.checkData(0,0,results) + tdSql.checkData(1,0,results) + tdSql.checkData(2,0,results) + + + results= query_datas[0] + if operator == "-": + for data in query_datas[1:]: + results -= data + tdSql.checkData(0,0,results) + tdSql.checkData(1,0,results) + tdSql.checkData(2,0,results) + + results= query_datas[0] + if operator == "*": + for data in query_datas[1:]: + results *= data + tdSql.checkData(0,0,results) + tdSql.checkData(1,0,results) + tdSql.checkData(2,0,results) + + results= query_datas[0] + if operator == "/": + for data in query_datas[1:]: + results /= data + tdSql.checkData(0,0,results) + tdSql.checkData(1,0,results) + tdSql.checkData(2,0,results) + + results= query_datas[0] + if operator == "%": + for data in query_datas[1:]: + results %= data + tdSql.checkData(0,0,results) + tdSql.checkData(1,0,results) + tdSql.checkData(2,0,results) + + def query_mix_select(self): + + tdLog.info (" ====================================== elapsed mixup with select function =================================================") + + querys = ["max(q_int)","min(q_int)" , "first(q_tinyint)", "first(*)","last(q_int)","last(*)","PERCENTILE(q_int,10)","APERCENTILE(q_int,10)","elapsed(ts,10s)"] + + + querys_mix = ["max(q_int)","min(q_int)" , "first(q_tinyint)", "first(q_int)","last(q_int)","PERCENTILE(q_int,10)","APERCENTILE(q_int,10)","elapsed(ts,10s)"] + + tdSql.query("select max(q_int),min(q_int) , first(q_tinyint), first(q_int),last(q_int),PERCENTILE(q_int,10),APERCENTILE(q_int,10) ,elapsed(ts,10s) from sub_table1_1 ; ") + + data = tdSql.getResult("select max(q_int),min(q_int) , first(q_tinyint), first(q_int),last(q_int),PERCENTILE(q_int,10),APERCENTILE(q_int,10) ,elapsed(ts,10s) from sub_table1_1 ; ") + + for index , query in enumerate(querys_mix): + sql = "select %s from sub_table1_1 " %(query) + tdSql.query(sql) + tdSql.checkData(0,0,data[0][index]) + + tdSql.query("select max(q_int),min(q_int) , first(q_tinyint), first(q_int),last(q_int),APERCENTILE(q_int,10) ,elapsed(ts,10s) from stable_1 group by tbname ; ") + + data = tdSql.getResult("select max(q_int),min(q_int) , first(q_tinyint), first(q_int),last(q_int),APERCENTILE(q_int,10) ,elapsed(ts,10s) from stable_1 group by tbname ; ") + + querys_mix = ["max(q_int)","min(q_int)" , "first(q_tinyint)", "first(q_int)","last(q_int)","APERCENTILE(q_int,10)","elapsed(ts,10s)"] + + for index , query in enumerate(querys_mix): + sql = "select %s from stable_1 group by tbname " %(query) + tdSql.query(sql) + tdSql.checkData(0,0,data[0][index]) + tdSql.checkData(1,0,data[0][index]) + tdSql.checkData(2,0,data[0][index]) + + operators = ["+" ,"-" , "*" ,"/" ,"%"] + querys_oper = querys_mix + + for operator in operators: + + query_datas=[] + + sql_common= "select " + + for index , query in enumerate(querys_oper): + + query_data = tdSql.getResult("select %s from sub_table1_1;"%query) + + query_datas.append(query_data[0][0]) + sql_common += " %s %s " %(query,operator) + sql_common=sql_common[:-2] + " from sub_table1_1;" + + tdSql.query(sql_common) + results= query_datas[0] + if operator == "+": + for data in query_datas[1:]: + results += data + tdSql.checkData(0,0,results) + + results= query_datas[0] + if operator == "-": + for data in query_datas[1:]: + results -= data + tdSql.checkData(0,0,results) + + results= query_datas[0] + if operator == "*": + for data in query_datas[1:]: + results *= data + tdSql.checkData(0,0,results) + + results= query_datas[0] + if operator == "/": + for data in query_datas[1:]: + results /= data + tdSql.checkData(0,0,results) + + results= query_datas[0] + if operator == "%": + for data in query_datas[1:]: + results %= data + tdSql.checkData(0,0,results) + + + # Arithmetic with elapsed for super table + + operators = ["+" ,"-" , "*" ,"/" ,"%"] + querys_oper = querys_mix + + for operator in operators: + + query_datas=[] + + sql_common= "select " + + for index , query in enumerate(querys_oper): + + query_data = tdSql.getResult("select %s from stable_1 group by tbname;"%query) + + query_datas.append(query_data[0][0]) + sql_common += " %s %s " %(query,operator) + sql_common=sql_common[:-2] + " from stable_1 group by tbname;" + + tdSql.query(sql_common) + results= query_datas[0] + if operator == "+": + for data in query_datas[1:]: + results += data + tdSql.checkData(0,0,results) + tdSql.checkData(1,0,results) + tdSql.checkData(2,0,results) + + + results= query_datas[0] + if operator == "-": + for data in query_datas[1:]: + results -= data + tdSql.checkData(0,0,results) + tdSql.checkData(1,0,results) + tdSql.checkData(2,0,results) + + results= query_datas[0] + if operator == "*": + for data in query_datas[1:]: + results *= data + tdSql.checkData(0,0,results) + tdSql.checkData(1,0,results) + tdSql.checkData(2,0,results) + + results= query_datas[0] + if operator == "/": + for data in query_datas[1:]: + results /= data + tdSql.checkData(0,0,results) + tdSql.checkData(1,0,results) + tdSql.checkData(2,0,results) + + results= query_datas[0] + if operator == "%": + for data in query_datas[1:]: + results %= data + tdSql.checkData(0,0,results) + tdSql.checkData(1,0,results) + tdSql.checkData(2,0,results) + + def query_mix_compute(self): + + tdLog.info (" ====================================== elapsed mixup with compute function =================================================") + + querys = ["diff(q_int)","DERIVATIVE(q_int,1s,1)","spread(ts)","spread(q_tinyint)","ceil(q_float)","floor(q_float)","round(q_float)"] + + for index , query in enumerate(querys): + + sql1 = "select elapsed(ts,10s),%s from sub_table1_1 " %(query) + sql2 = "select elapsed(ts,10s),%s from stable_1 group by tbname" %(query) + if query in ["diff(q_int)","DERIVATIVE(q_int,1s,1)","ceil(q_float)","floor(q_float)","round(q_float)"]: + tdSql.error(sql1) + tdSql.error(sql2) + continue + tdSql.query(sql1) + tdSql.query(sql2) + + # only support mixup with spread + + sql = "select spread(ts)*10,spread(q_tinyint)-10,elapsed(ts,10s) from sub_table1_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" ;" + tdSql.execute(sql) + + data = tdSql.getResult(sql) + + sql = "select spread(ts)*10,spread(q_tinyint)-10,elapsed(ts,10s) from stable_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" ;" + tdSql.execute(sql) + + querys_mix = ["spread(ts)","spread(q_tinyint)-10","elapsed(ts,10s)"] + + for index , query in enumerate(querys_mix): + sql = "select %s from sub_table1_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" ; " %(query) + tdSql.query(sql) + + operators = ["+" ,"-" , "*" ,"/" ,"%"] + querys_oper = querys_mix + + for operator in operators: + + sql_common= "select " + + for index , query in enumerate(querys_oper): + + sql_common += " %s %s " %(query,operator) + sql_common=sql_common[:-2] + " from stable_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" ;" + + tdSql.query(sql_common) + + for index , query in enumerate(querys_mix): + sql = "select %s from stable_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" ; " %(query) + tdSql.query(sql) + + operators = ["+" ,"-" , "*" ,"/" ,"%"] + querys_oper = querys_mix + + for operator in operators: + + sql_common= "select " + + for index , query in enumerate(querys_oper): + + sql_common += " %s %s " %(query,operator) + sql_common=sql_common[:-2] + " from stable_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" ;" + + tdSql.query(sql_common) + + def query_mix_arithmetic(self): + + tdLog.info (" ====================================== elapsed mixup with arithmetic =================================================") + + tdSql.execute("select elapsed(ts,10s)+1 ,elapsed(ts,10s)-2,elapsed(ts,10s)*3,elapsed(ts,10s)/4,elapsed(ts,10s)%5 from sub_table1_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" ; ") + tdSql.execute("select elapsed(ts,10s)+1 ,elapsed(ts,10s)-2,elapsed(ts,10s)*3,elapsed(ts,10s)/4,elapsed(ts,10s)%5 from stable_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" ; ") + + # queries = ["elapsed(ts,10s)+1" ,"elapsed(ts,10s)-2","elapsed(ts,10s)*3","elapsed(ts,10s)/4","elapsed(ts,10s)%5" ] + + # for index ,query in enumerate(queries): + # sql = "select %s from sub_table1_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev) ;" % (query) + # data = tdSql.getResult(sql) + # tdSql.query("select elapsed(ts,10s)+1 ,elapsed(ts,10s)-2,elapsed(ts,10s)*3,elapsed(ts,10s)/4,elapsed(ts,10s)%5 from sub_table1_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev) ; ") + # tdSql.checkData(0,index+1,data[0][1]) + + def query_with_join(self): + + tdLog.info (" ====================================== elapsed mixup with join =================================================") + + tdSql.error("select elapsed(ts,10s) from stable_empty TABLE1 , stable_empty TABLE2 where TABLE1.ts =TABLE2.ts; ") + tdSql.error("select elapsed(ts,10s) from stable_empty TABLE1 , stable_empty TABLE2 where TABLE1.ts =TABLE2.ts group by tbname; ") + + tdSql.execute("select elapsed(ts,10s) from sub_empty_1 TABLE1 , sub_empty_2 TABLE2 where TABLE1.ts =TABLE2.ts; ") + tdSql.error("select elapsed(ts,10s) from stable_1 TABLE1 , stable_2 TABLE2 where TABLE1.ts =TABLE2.ts and TABLE1.ind =TABLE2.ind; ") + tdSql.error("select elapsed(ts,10s) from stable_1 TABLE1 , stable_2 TABLE2 where TABLE1.ts =TABLE2.ts and TABLE1.ind =TABLE2.ind group by tbname,ind; ") # join not support group by + + tdSql.error("select elapsed(ts,10s) from sub_empty_1 TABLE1 , stable_2 TABLE2 where TABLE1.ts =TABLE2.ts and TABLE1.ind =TABLE2.ind ; ") + tdSql.execute("select elapsed(ts,10s) from sub_empty_1 TABLE1 , sub_empty_2 TABLE2 where TABLE1.ts =TABLE2.ts ; ") + + tdSql.query("select elapsed(ts,10s) from sub_table1_1 TABLE1 , sub_table1_2 TABLE2 where TABLE1.ts =TABLE2.ts ; ") + tdSql.checkData(0,0,9) + + tdSql.query("select elapsed(ts,10s) from sub_empty_1 TABLE1 , sub_table1_2 TABLE2 where TABLE1.ts =TABLE2.ts ; ") + tdSql.checkRows(0) + + tdSql.query("select elapsed(ts,10s) from sub_empty_1 TABLE1 , regular_empty TABLE2 where TABLE1.ts =TABLE2.ts ; ") + tdSql.checkRows(0) + + tdSql.query("select elapsed(ts,10s) from sub_empty_1 TABLE1 , regular_table_1 TABLE2 where TABLE1.ts =TABLE2.ts ; ") + tdSql.checkRows(0) + + tdSql.query("select elapsed(ts,10s) from sub_table1_3 TABLE1 , regular_table_1 TABLE2 where TABLE1.ts =TABLE2.ts ; ") + tdSql.checkRows(1) + tdSql.checkData(0,0,9) + + tdSql.query("select elapsed(ts,10s) from regular_table_1 ; ") + tdSql.checkRows(1) + tdSql.checkData(0,0,9) + + def query_with_union(self): + + tdLog.info (" ====================================== elapsed mixup with union all =================================================") + + # union all with empty + + tdSql.query("select elapsed(ts,10s) from regular_table_1 union all select elapsed(ts,10s) from regular_table_2;") + + tdSql.query("select elapsed(ts,10s) from regular_table_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev) union all \ + select elapsed(ts,10s) from regular_table_2 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev);") + tdSql.checkRows(1200) + tdSql.checkData(0,1,0.1) + tdSql.checkData(500,1,0) + + tdSql.query("select elapsed(ts,10s) from sub_empty_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev) union all \ + select elapsed(ts,10s) from regular_table_2 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev);") + tdSql.checkRows(600) + tdSql.checkData(0,1,0.1) + tdSql.checkData(500,0,0) + + tdSql.query('select elapsed(ts,10s) from sub_empty_1 union all select elapsed(ts,10s) from sub_empty_2;') + tdSql.checkRows(0) + + tdSql.query('select elapsed(ts,10s) from regular_table_1 union all select elapsed(ts,10s) from sub_empty_1;') + tdSql.checkRows(1) + tdSql.checkData(0,0,9) + + tdSql.query('select elapsed(ts,10s) from sub_empty_1 union all select elapsed(ts,10s) from regular_table_1;') + tdSql.checkRows(1) + tdSql.checkData(0,0,9) + + tdSql.query('select elapsed(ts,10s) from sub_empty_1 union all select elapsed(ts,10s) from sub_table1_1;') + tdSql.checkRows(1) + tdSql.checkData(0,0,9) + + tdSql.query('select elapsed(ts,10s) from sub_table1_1 union all select elapsed(ts,10s) from sub_empty_1;') + tdSql.checkRows(1) + tdSql.checkData(0,0,9) + + tdSql.query('select elapsed(ts,10s) from sub_empty_1 union all select elapsed(ts,10s) from regular_table_1;') + tdSql.checkRows(1) + tdSql.checkData(0,0,9) + + tdSql.error('select elapsed(ts,10s) from sub_empty_1 union all select elapsed(ts,10s) from stable_sub_empty group by tbname;') + + tdSql.error('select elapsed(ts,10s) from regular_table_1 union all select elapsed(ts,10s) from stable_sub_empty group by tbname;') + + tdSql.query('select elapsed(ts,10s) from sub_empty_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(1s) fill(prev) union all select elapsed(ts,10s) from sub_empty_2 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(1s) fill(prev);') + tdSql.checkRows(0) + + tdSql.error('select elapsed(ts,10s) from sub_empty_1 union all select elapsed(ts,10s) from stable_empty group by tbname;') + + tdSql.error('select elapsed(ts,10s) from sub_empty_1 interval(1s) union all select elapsed(ts,10s) from stable_empty interval(1s) group by tbname;') + + # tdSql.error('select elapsed(ts,10s) from sub_empty_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(1s) fill(prev) union all select elapsed(ts,10s) from stable_empty where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(1s) fill(prev) group by tbname;') + + tdSql.query("select elapsed(ts,10s) from stable_empty group by tbname union all select elapsed(ts,10s) from stable_empty group by tbname ;") + tdSql.checkRows(0) + + # case : TD-12229 + tdSql.query("select elapsed(ts,10s) from stable_empty group by tbname union all select elapsed(ts,10s) from stable_1 group by tbname ;") + tdSql.checkRows(3) + + tdSql.query("select elapsed(ts,10s) from stable_1 group by tbname union all select elapsed(ts,10s) from stable_1 group by tbname ;") + tdSql.checkRows(6) + tdSql.checkData(0,0,9) + tdSql.checkData(5,0,9) + + tdSql.query("select elapsed(ts,10s) from stable_1 group by tbname union all select elapsed(ts,10s) from stable_2 group by tbname ;") + tdSql.checkRows(6) + tdSql.checkData(0,0,9) + tdSql.checkData(5,0,9) + + tdSql.query('select elapsed(ts,10s) from stable_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname union all\ + select elapsed(ts,10s) from stable_2 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname ;') + tdSql.checkRows(360) + tdSql.checkData(0,1,1) + tdSql.checkData(50,1,0) + + #case : TD-12229 + tdSql.query('select elapsed(ts,10s) from stable_empty group by tbname union all select elapsed(ts,10s) from stable_2 group by tbname ;') + tdSql.checkRows(3) + + tdSql.query('select elapsed(ts,10s) from stable_1 group by tbname union all select elapsed(ts,10s) from stable_empty group by tbname ;') + tdSql.checkRows(3) + + + tdSql.query('select elapsed(ts,10s) from stable_empty where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname union all\ + select elapsed(ts,10s) from stable_2 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname ;') + tdSql.checkRows(180) + + tdSql.query('select elapsed(ts,10s) from stable_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname union all\ + select elapsed(ts,10s) from stable_empty where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname ;') + tdSql.checkRows(180) + + # union all with sub table and regular table + + # sub_table with sub_table + + tdSql.query('select elapsed(ts,10s) from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) union all\ + select elapsed(ts,10s) from sub_table2_2 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) ;') + tdSql.checkRows(120) + tdSql.checkData(0,1,1) + tdSql.checkData(12,1,0) + + tdSql.query('select elapsed(ts,10s) from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) union all\ + select elapsed(ts,10s) from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) ;') + tdSql.checkRows(120) + tdSql.checkData(0,1,1) + tdSql.checkData(12,1,0) + + tdSql.query('select elapsed(ts,10s) from regular_table_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) union all\ + select elapsed(ts,10s) from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) ;') + tdSql.checkRows(120) + tdSql.checkData(0,1,1) + tdSql.checkData(12,1,0) + + tdSql.query('select elapsed(ts,10s) from regular_table_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) union all\ + select elapsed(ts,10s) from regular_table_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) ;') + tdSql.checkRows(120) + tdSql.checkData(0,1,1) + tdSql.checkData(12,1,0) + + tdSql.query('select elapsed(ts,10s) from regular_table_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) union all\ + select elapsed(ts,10s) from regular_table_2 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) ;') + tdSql.checkRows(120) + tdSql.checkData(0,1,1) + tdSql.checkData(12,1,0) + + tdSql.query('select elapsed(ts,10s) from regular_table_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) union all\ + select elapsed(ts,10s) from regular_table_2 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) ;') + tdSql.checkRows(120) + tdSql.checkData(0,1,1) + tdSql.checkData(12,1,0) + + tdSql.query('select elapsed(ts,10s) from sub_empty_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) union all\ + select elapsed(ts,10s) from regular_table_2 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) ;') + tdSql.checkRows(60) + tdSql.checkData(0,1,1) + tdSql.checkData(12,1,0) + + tdSql.query('select elapsed(ts,10s) from regular_table_2 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) union all\ + select elapsed(ts,10s) from sub_empty_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) ;') + tdSql.checkRows(60) + tdSql.checkData(0,1,1) + tdSql.checkData(12,1,0) + + # stable with stable + + tdSql.query('select elapsed(ts,10s) from stable_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname union all\ + select elapsed(ts,10s) from stable_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname;') + tdSql.checkRows(360) + tdSql.checkData(0,1,1) + tdSql.checkData(12,1,0) + + tdSql.query('select elapsed(ts,10s) from regular_table_2 interval(10s) union all select elapsed(ts,10s) from sub_empty_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev);') + tdSql.checkRows(10) + tdSql.checkData(0,1,1) + tdSql.checkData(9,1,0) + + tdSql.query('select elapsed(ts,10s) from regular_table_2 interval(10s) union all select elapsed(ts,10s) from regular_table_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) ;') + tdSql.checkRows(70) + tdSql.checkData(0,1,1) + tdSql.checkData(9,1,0) + + tdSql.query('select elapsed(ts,10s) from regular_table_2 interval(10s) order by ts desc union all select elapsed(ts,10s) from regular_table_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) order by ts asc;') + tdSql.checkRows(70) + tdSql.checkData(0,1,0) + tdSql.checkData(1,1,1) + tdSql.checkData(9,1,1) + + tdSql.query('select elapsed(ts,10s) from stable_1 group by tbname, ind order by ts desc union all select elapsed(ts,10s) from stable_2 group by tbname, ind order by ts asc ;') + tdSql.checkRows(6) + tdSql.checkData(0,0,9) + + tdSql.query('select elapsed(ts,10s) from stable_1 group by tbname, ind order by ts desc union all select elapsed(ts,10s) from stable_1 group by tbname, ind order by ts asc ;') + tdSql.checkRows(6) + tdSql.checkData(0,0,9) + + tdSql.query('select elapsed(ts,10s) from stable_1 interval(10s) group by tbname,ind order by ts desc union all select elapsed(ts,10s) from stable_2 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname,ind order by ts asc ;') + tdSql.checkRows(210) + tdSql.checkData(0,1,0) + tdSql.checkData(1,1,1) + tdSql.checkData(9,1,1) + + tdSql.query('select elapsed(ts,10s) from stable_2 interval(10s) group by tbname,ind order by ts desc union all select elapsed(ts,10s) from stable_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname,ind order by ts asc ;') + tdSql.checkRows(210) + tdSql.checkData(0,1,0) + tdSql.checkData(1,1,1) + tdSql.checkData(9,1,1) + + tdSql.query('select elapsed(ts,10s) from stable_1 interval(10s) group by tbname,ind order by ts desc union all select elapsed(ts,10s) from stable_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname,ind order by ts asc ;') + tdSql.checkRows(210) + tdSql.checkData(0,1,0) + tdSql.checkData(1,1,1) + tdSql.checkData(9,1,1) + + def query_nest(self): + + tdLog.info (" ====================================== elapsed query for nest =================================================") + + # ===============================================outer nest============================================ + + # regular table + + # ts can't be used at outer query + + tdSql.query("select elapsed(ts,10s) from (select ts from regular_table_1 );") + + # case : TD-12164 + + tdSql.error("select elapsed(ts,10s) from (select qint ts from regular_table_1 );") + tdSql.error("select elapsed(tbname ,10s) from (select qint tbname from regular_table_1 );") + tdSql.error("select elapsed(tsc ,1s) from (select q_int tsc from regular_table_1) ;") + tdSql.error("select elapsed(tsv ,1s) from (select elapsed(ts,1s) tsv from regular_table_1);") + tdSql.error("select elapsed(ts ,1s) from (select elapsed(ts,1s) ts from regular_table_1);") + # # bug fix + # tdSql.error("select elapsed(tsc ,1s) from (select tscol tsc from regular_table_1) ;") + + # case TD-12276 + # tdSql.error("select elapsed(ts,10s) from (select ts,tbname from regular_table_1 order by ts asc );") + + # tdSql.error("select elapsed(ts,10s) from (select ts,tbname from regular_table_1 order by ts desc );") + + # tdSql.error("select elapsed(ts,10s) from (select ts ,max(q_int),tbname from regular_table_1 order by ts ) interval(1s);") + + # tdSql.error("select elapsed(ts,10s) from (select ts ,q_int,tbname from regular_table_1 order by ts ) interval(1s);") + + # sub table + + tdSql.query("select elapsed(ts,10s) from (select ts from sub_table1_1 );") + + # tdSql.error("select elapsed(ts,10s) from (select ts ,max(q_int),tbname from sub_table1_1 order by ts ) interval(1s);") + + # tdSql.error("select elapsed(ts,10s) from (select ts ,q_int,tbname from sub_table1_1 order by ts ) interval(1s);") + + tdSql.query("select elapsed(ts,10s) from (select ts ,tbname,top(q_int,3) from sub_table1_1 ) interval(10s);") + + tdSql.query("select elapsed(ts,10s) from (select ts ,tbname,bottom(q_int,3) from sub_table1_1 ) interval(10s);") + + tdSql.query("select elapsed(ts,10s) from (select ts ,tbname from sub_table1_1 ) interval(10s);") + + tdSql.query("select elapsed(ts,10s) from (select ts ,tbname from sub_table1_1 ) interval(10s);") + + # tdSql.error("select elapsed(ts,10s) from (select ts ,count(*),tbname from sub_table1_1 order by ts ) interval(1s);") + + querys = ["count(*)","avg(q_int)", "sum(q_double)","stddev(q_float)","LEASTSQUARES(q_int,0,1)","elapsed(ts,10s)"] + + for query in querys: + sql1 = "select elapsed(ts,10s) from (select %s from regular_table_1 order by ts ) interval(1s); " % query + sql2 = "select elapsed(ts,10s) from (select ts , tbname ,%s from regular_table_1 order by ts ) interval(1s); " % query + sql3 = "select elapsed(ts,10s) from (select ts , tbname ,%s from stable_1 group by tbname, ind order by ts ) interval(1s); " % query + sql4 = "select elapsed(ts,10s) from (select %s from sub_table2_1 order by ts ) interval(1s); " % query + sql5 = "select elapsed(ts,10s) from (select ts , tbname ,%s from sub_table2_1 order by ts ) interval(1s); " % query + + tdSql.error(sql1) + tdSql.error(sql2) + tdSql.error(sql3) + tdSql.error(sql4) + tdSql.error(sql5) + + + # case TD-12164 + tdSql.error( "select elapsed(ts00 ,1s) from (select elapsed(ts,1s) ts00 from regular_table_1) ; " ) + tdSql.error( "select elapsed(ts ,1s) from (select elapsed(ts,1s) ts from regular_table_1) ; " ) + + tdSql.error( "select elapsed(ts00 ,1s) from (select elapsed(ts,1s) ts00 from stable_1 group by tbname ) ; " ) + tdSql.error( "select elapsed(ts ,1s) from (select elapsed(ts,1s) ts from stable_1 group by tbname) ; " ) + + + # stable + + tdSql.error("select elapsed(ts,10s) from (select ts from stable_1 ) group by tbname ;") + + tdSql.error("select elapsed(ts,10s) from (select ts ,max(q_int),tbname from stable_1 group by tbname order by ts ) interval(1s) group by tbname;") + + tdSql.error("select elapsed(ts,10s) from (select ts ,q_int,tbname from stable_1 order by ts ) interval(1s) group by tbname;") + + # mixup with aggregate + + querys = ["max(q_int)","min(q_int)" , "first(q_tinyint)", "first(*)","last(q_int)","last(*)","top(q_double,1)", + "bottom(q_float,1)","PERCENTILE(q_int,10)","APERCENTILE(q_int,10)" ,"elapsed(ts,10s)"] + + for index , query in enumerate(querys): + + sql1 = "select elapsed(ts,10s) from (select %s from sub_table1_1) where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(10s) fill(prev) ; " %(query) + sql2 = "select elapsed(ts,10s) from (select %s from stable_1 ) where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(10s) fill(prev) group by tbname; " %(query) + sql3 = "select elapsed(ts,10s) from (select %s from stable_1 group by tbname) where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(10s) fill(prev) group by tbname; " %(query) + + if query in ["interp(q_int)" ]: + # print(sql1 ) + # print(sql2) + tdSql.query(sql1) + tdSql.error(sql2) + else: + tdSql.error(sql1) + tdSql.error(sql2) + tdSql.error(sql3) + + tdSql.error("select elapsed(ts,10s) from (select ts,tbname from regular_table_1 order by ts ) where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev);") + + tdSql.error("select elapsed(ts,10s) from (select ts ,max(q_int),tbname from regular_table_1 order by ts ) where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev);") + + # ===============================================inner nest============================================ + + # sub table + + tdSql.query("select data from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from sub_table1_1 ); ") + tdSql.checkData(0,0,9) + + # tdSql.query("select data from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from sub_table1_1 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(600) + # tdSql.checkData(0,0,0.1) + + tdSql.query("select * from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 ); ") + tdSql.checkData(0,5,9) + + # tdSql.query("select * from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(600) + # tdSql.checkData(0,0,0.1) + + tdSql.query("select max(data) from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 ); ") + tdSql.checkData(0,0,9) + + # tdSql.query("select max(data) from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(1) + # tdSql.checkData(0,0,0.1) + + # tdSql.query("select max(data) from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from sub_empty_2 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(0) + + # tdSql.query("select max(data),min(data),avg(data) from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(1) + + # tdSql.query("select ceil(data),floor(data),round(data) from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(600) + + # tdSql.query("select spread(data) from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(1) + + # tdSql.query("select diff(data) from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(599) + + # tdSql.query("select DERIVATIVE(data ,1s ,1) from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(598) + + # tdSql.query("select ceil(data)from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(600) + + # tdSql.query("select floor(data)from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(600) + + # tdSql.query("select round(data)from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(600) + + # tdSql.query("select data*10+2 from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(600) + + # tdSql.query("select data*10+2 from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(600) + + def query_session_windows(self): + + # case TD-12344 + # session not support stable + tdSql.error('select elapsed(ts,10s) from stable_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" session(ts ,10s) group by tbname,ind order by ts asc ') + + tdSql.query('select elapsed(ts,10s) from sub_table1_1 session(ts,1w) ; ') + tdSql.checkRows(1) + tdSql.checkData(0,0,9) + tdSql.query('select elapsed(ts,10s) from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" session(ts,1w) ; ') + tdSql.checkRows(1) + tdSql.checkData(0,0,9) + + tdSql.error('select elapsed(ts,10s) from ( select * from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000") session(ts,1w) ; ') + + tdSql.error('select elapsed(ts,10s) from ( select ts ,q_int from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000") session(ts,1w) ; ') + + tdSql.error('select elapsed(ts,10s) from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(20s) fill (next) session(ts,1w) ; ') + + tdSql.query('select elapsed(ts,10s) from sub_empty_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" session(ts,1w) ; ') + tdSql.checkRows(0) + + # windows state + # not support stable + + tdSql.error('select elapsed(ts,10s) from stable_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" state_window(q_int) group by tbname,ind order by ts asc ') + + tdSql.query('select elapsed(ts,10s) from sub_table1_1 state_window(q_int) ; ') + tdSql.checkRows(10) + tdSql.checkData(0,0,0) + tdSql.query('select elapsed(ts,10s) from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" state_window(q_int) ; ') + tdSql.checkRows(10) + tdSql.checkData(0,0,0) + + # tdSql.error('select elapsed(ts,10s) from ( select * from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000") state_window(q_int) ; ') + + # tdSql.error('select elapsed(ts,10s) from ( select ts ,q_int from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000") state_window(q_int) ; ') + + # tdSql.error('select elapsed(ts,10s) from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(20s) fill (next) state_window(q_int) ; ') + + # tdSql.query('select elapsed(ts,10s) from sub_empty_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" state_window(q_int); ') + # tdSql.checkRows(0) + + + def continuous_query(self): + tdSql.error('create table elapsed_t as select elapsed(ts) from sub_table1_1 interval(1m) sliding(30s);') + tdSql.error('create table elapsed_tb as select elapsed(ts) from stable_1 interval(1m) sliding(30s) group by tbname;') + tdSql.error('create table elapsed_tc as select elapsed(ts) from stable_1 interval(10s) sliding(5s) interval(1m) sliding(30s) group by tbname;') + + def query_precision(self): + def generate_data(precision="ms"): + + tdSql.execute("create database if not exists db_%s precision '%s';" %(precision, precision)) + tdSql.execute("use db_%s;" %precision) + tdSql.execute("create stable db_%s.st (ts timestamp , id int) tags(ind int);"%precision) + tdSql.execute("create table db_%s.tb1 using st tags(1);"%precision) + tdSql.execute("create table db_%s.tb2 using st tags(2);"%precision) + + if precision == "ms": + start_ts = self.ts + step = 10000 + elif precision == "us": + start_ts = self.ts*1000 + step = 10000000 + elif precision == "ns": + start_ts = self.ts*1000000 + step = 10000000000 + else: + pass + + for i in range(10): + + sql1 = "insert into db_%s.tb1 values (%d,%d)"%(precision ,start_ts+i*step,i) + sql2 = "insert into db_%s.tb1 values (%d,%d)"%(precision, start_ts+i*step,i) + tdSql.execute(sql1) + tdSql.execute(sql2) + + time_units = ["10s","10a","10u","10b"] + + precision_list = ["ms","us","ns"] + for pres in precision_list: + generate_data(pres) + + for index,unit in enumerate(time_units): + + if pres == "ms": + if unit in ["10u","10b"]: + tdSql.error("select elapsed(ts,%s) from db_%s.st group by tbname "%(unit,pres)) + pass + else: + tdSql.query("select elapsed(ts,%s) from db_%s.st group by tbname "%(unit,pres)) + elif pres == "us" and unit in ["10b"]: + if unit in ["10b"]: + tdSql.error("select elapsed(ts,%s) from db_%s.st group by tbname "%(unit,pres)) + pass + else: + tdSql.query("select elapsed(ts,%s) from db_%s.st group by tbname "%(unit,pres)) + else: + + tdSql.query("select elapsed(ts,%s) from db_%s.st group by tbname "%(unit,pres)) + basic_result = 9 + tdSql.checkData(0,0,basic_result*pow(1000,index)) + + def run(self): + tdSql.prepare() + self.prepare_data() + self.abnormal_common_test() + self.abnormal_use_test() + self.query_filter() + # self.query_interval() + self.query_mix_common() + self.query_mix_Aggregate() + self.query_mix_select() + self.query_mix_compute() + self.query_mix_arithmetic() + # self.query_with_join() + # self.query_with_union() + self.query_nest() + self.query_session_windows() + self.continuous_query() + self.query_precision() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + diff --git a/tests/system-test/2-query/first.py b/tests/system-test/2-query/first.py new file mode 100644 index 0000000000000000000000000000000000000000..7227d1afb5e22f68af90fb9d2192eb7a4a088c96 --- /dev/null +++ b/tests/system-test/2-query/first.py @@ -0,0 +1,152 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.rowNum = 10 + self.ts = 1537146000000 + + def run(self): + tdSql.prepare() + + tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') + tdSql.execute("create table test1 using test tags('beijing')") + tdSql.execute("insert into test1(ts) values(%d)" % (self.ts - 1)) + + # first verifacation + # bug TD-15957 + tdSql.query("select first(*) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 1, None) + + tdSql.query("select first(col1) from test1") + tdSql.checkRows(0) + + tdSql.query("select first(col2) from test1") + tdSql.checkRows(0) + + tdSql.query("select first(col3) from test1") + tdSql.checkRows(0) + + tdSql.query("select first(col4) from test1") + tdSql.checkRows(0) + + tdSql.query("select first(col11) from test1") + tdSql.checkRows(0) + + tdSql.query("select first(col12) from test1") + tdSql.checkRows(0) + + tdSql.query("select first(col13) from test1") + tdSql.checkRows(0) + + tdSql.query("select first(col14) from test1") + tdSql.checkRows(0) + + tdSql.query("select first(col5) from test1") + tdSql.checkRows(0) + + tdSql.query("select first(col6) from test1") + tdSql.checkRows(0) + + tdSql.query("select first(col7) from test1") + tdSql.checkRows(0) + + tdSql.query("select first(col8) from test1") + tdSql.checkRows(0) + + tdSql.query("select first(col9) from test1") + tdSql.checkRows(0) + + for i in range(self.rowNum): + tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + + tdSql.query("select first(*) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 1, 1) + + tdSql.query("select first(col1) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query("select first(col2) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query("select first(col3) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query("select first(col4) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query("select first(col11) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query("select first(col12) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query("select first(col13) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query("select first(col14) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query("select first(col5) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.1) + + tdSql.query("select first(col6) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.1) + + tdSql.query("select first(col7) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, False) + + tdSql.query("select first(col8) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 'taosdata1') + + tdSql.query("select first(col9) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, '涛思数据1') + + + tdSql.query("select first(*),last(*) from test1 where ts < 23 interval(1s)") + tdSql.checkRows(0) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/function_diff.py b/tests/system-test/2-query/function_diff.py new file mode 100644 index 0000000000000000000000000000000000000000..325bd2bc8ebd79f3e58daf6690492dc8ca329dda --- /dev/null +++ b/tests/system-test/2-query/function_diff.py @@ -0,0 +1,432 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import subprocess +import random +import math +import numpy as np +import inspect +import re + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def diff_query_form(self, col="c1", alias="", table_expr="t1", condition=""): + + ''' + diff function: + :param col: string, column name, required parameters; + :param alias: string, result column another name,or add other funtion; + :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters; + :param condition: expression; + :param args: other funtions,like: ', last(col)',or give result column another name, like 'c2' + :return: diff query statement,default: select diff(c1) from t1 + ''' + + return f"select diff({col}) {alias} from {table_expr} {condition}" + + def checkdiff(self,col="c1", alias="", table_expr="t1", condition="" ): + line = sys._getframe().f_back.f_lineno + pre_sql = self.diff_query_form( + col=col, table_expr=table_expr, condition=condition + ).replace("diff", "count") + tdSql.query(pre_sql) + + if tdSql.queryRows == 0: + tdSql.query(self.diff_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + print(f"case in {line}: ", end='') + tdSql.checkRows(0) + return + + if "order by tbname" in condition: + tdSql.error(self.diff_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + return + + if "group" in condition: + + tb_condition = condition.split("group by")[1].split(" ")[1] + tdSql.query(f"select distinct {tb_condition} from {table_expr}") + query_result = tdSql.queryResult + query_rows = tdSql.queryRows + clear_condition = re.sub('order by [0-9a-z]*|slimit [0-9]*|soffset [0-9]*', "", condition) + + pre_row = 0 + for i in range(query_rows): + group_name = query_result[i][0] + if "where" in clear_condition: + pre_condition = re.sub('group by [0-9a-z]*', f"{tb_condition}='{group_name}'", clear_condition) + else: + pre_condition = "where " + re.sub('group by [0-9a-z]*',f"{tb_condition}='{group_name}'", clear_condition) + + tdSql.query(f"select {col} {alias} from {table_expr} {pre_condition}") + pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + pre_diff = np.diff(pre_data) + # trans precision for data + tdSql.query(self.diff_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + for j in range(len(pre_diff)): + print(f"case in {line}:", end='') + if isinstance(pre_diff[j] , float) : + pass + else: + tdSql.checkData(pre_row+j, 1, pre_diff[j] ) + pre_row += len(pre_diff) + return + elif "union" in condition: + union_sql_0 = self.diff_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + ).split("union all")[0] + + union_sql_1 = self.diff_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + ).split("union all")[1] + + tdSql.query(union_sql_0) + union_diff_0 = tdSql.queryResult + row_union_0 = tdSql.queryRows + + tdSql.query(union_sql_1) + union_diff_1 = tdSql.queryResult + + tdSql.query(self.diff_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + for i in range(tdSql.queryRows): + print(f"case in {line}: ", end='') + if i < row_union_0: + tdSql.checkData(i, 0, union_diff_0[i][0]) + else: + tdSql.checkData(i, 0, union_diff_1[i-row_union_0][0]) + return + + else: + tdSql.query(f"select {col} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}") + offset_val = condition.split("offset")[1].split(" ")[1] if "offset" in condition else 0 + pre_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + pre_diff = np.diff(pre_result)[offset_val:] + tdSql.query(self.diff_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + + for i in range(tdSql.queryRows): + print(f"case in {line}: ", end='') + if isinstance(pre_diff[i] , float ): + pass + else: + tdSql.checkData(i, 0, pre_diff[i]) + + pass + + def diff_current_query(self) : + + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + + # case1~6: numeric col:int/bigint/tinyint/smallint/float/double + self.checkdiff() + case2 = {"col": "c2"} + self.checkdiff(**case2) + case3 = {"col": "c5"} + self.checkdiff(**case3) + case4 = {"col": "c7"} + self.checkdiff(**case4) + case5 = {"col": "c8"} + self.checkdiff(**case5) + case6 = {"col": "c9"} + self.checkdiff(**case6) + + # case7~8: nested query + # case7 = {"table_expr": "(select c1 from stb1)"} + # self.checkdiff(**case7) + # case8 = {"table_expr": "(select diff(c1) c1 from stb1 group by tbname)"} + # self.checkdiff(**case8) + + # case9~10: mix with tbname/ts/tag/col + # case9 = {"alias": ", tbname"} + # self.checkdiff(**case9) + # case10 = {"alias": ", _c0"} + # self.checkdiff(**case10) + # case11 = {"alias": ", st1"} + # self.checkdiff(**case11) + # case12 = {"alias": ", c1"} + # self.checkdiff(**case12) + + # case13~15: with single condition + case13 = {"condition": "where c1 <= 10"} + self.checkdiff(**case13) + case14 = {"condition": "where c6 in (0, 1)"} + self.checkdiff(**case14) + case15 = {"condition": "where c1 between 1 and 10"} + self.checkdiff(**case15) + + # case16: with multi-condition + case16 = {"condition": "where c6=1 or c6 =0"} + self.checkdiff(**case16) + + # case17: only support normal table join + case17 = { + "col": "t1.c1", + "table_expr": "t1, t2", + "condition": "where t1.ts=t2.ts" + } + self.checkdiff(**case17) + # case18~19: with group by + # case18 = { + # "table_expr": "t1", + # "condition": "group by c6" + # } + # self.checkdiff(**case18) + # case19 = { + # "table_expr": "stb1", + # "condition": "partition by tbname" # partition by tbname + # } + # self.checkdiff(**case19) + + # # case20~21: with order by + # case20 = {"condition": "order by ts"} + # self.checkdiff(**case20) + + # # case22: with union + # case22 = { + # "condition": "union all select diff(c1) from t2" + # } + # self.checkdiff(**case22) + + # case23: with limit/slimit + case23 = { + "condition": "limit 1" + } + self.checkdiff(**case23) + # case24 = { + # "table_expr": "stb1", + # "condition": "group by tbname slimit 1 soffset 1" + # } + # self.checkdiff(**case24) + + pass + + def diff_error_query(self) -> None : + # unusual test + # + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + # + # form test + tdSql.error(self.diff_query_form(col="")) # no col + tdSql.error("diff(c1) from stb1") # no select + tdSql.error("select diff from t1") # no diff condition + tdSql.error("select diff c1 from t1") # no brackets + tdSql.error("select diff(c1) t1") # no from + tdSql.error("select diff( c1 ) from ") # no table_expr + # tdSql.error(self.diff_query_form(col="st1")) # tag col + tdSql.query("select diff(st1) from t1 ") + # tdSql.error(self.diff_query_form(col=1)) # col is a value + tdSql.error(self.diff_query_form(col="'c1'")) # col is a string + tdSql.error(self.diff_query_form(col=None)) # col is NULL 1 + tdSql.error(self.diff_query_form(col="NULL")) # col is NULL 2 + tdSql.error(self.diff_query_form(col='""')) # col is "" + tdSql.error(self.diff_query_form(col='c%')) # col is spercial char 1 + tdSql.error(self.diff_query_form(col='c_')) # col is spercial char 2 + tdSql.error(self.diff_query_form(col='c.')) # col is spercial char 3 + tdSql.error(self.diff_query_form(col='c3')) # timestamp col + tdSql.error(self.diff_query_form(col='ts')) # Primary key + tdSql.error(self.diff_query_form(col='avg(c1)')) # expr col + # tdSql.error(self.diff_query_form(col='c6')) # bool col + tdSql.query("select diff(c6) from t1") + tdSql.error(self.diff_query_form(col='c4')) # binary col + tdSql.error(self.diff_query_form(col='c10')) # nachr col + tdSql.error(self.diff_query_form(col='c10')) # not table_expr col + tdSql.error(self.diff_query_form(col='t1')) # tbname + tdSql.error(self.diff_query_form(col='stb1')) # stbname + tdSql.error(self.diff_query_form(col='db')) # datbasename + # tdSql.error(self.diff_query_form(col=True)) # col is BOOL 1 + # tdSql.error(self.diff_query_form(col='True')) # col is BOOL 2 + tdSql.error(self.diff_query_form(col='*')) # col is all col + tdSql.error("select diff[c1] from t1") # sql form error 1 + tdSql.error("select diff{c1} from t1") # sql form error 2 + tdSql.error(self.diff_query_form(col="[c1]")) # sql form error 3 + # tdSql.error(self.diff_query_form(col="c1, c2")) # sql form error 3 + # tdSql.error(self.diff_query_form(col="c1, 2")) # sql form error 3 + tdSql.error(self.diff_query_form(alias=", count(c1)")) # mix with aggregate function 1 + tdSql.error(self.diff_query_form(alias=", avg(c1)")) # mix with aggregate function 2 + tdSql.error(self.diff_query_form(alias=", min(c1)")) # mix with select function 1 + tdSql.error(self.diff_query_form(alias=", top(c1, 5)")) # mix with select function 2 + tdSql.error(self.diff_query_form(alias=", spread(c1)")) # mix with calculation function 1 + tdSql.error(self.diff_query_form(alias=", diff(c1)")) # mix with calculation function 2 + # tdSql.error(self.diff_query_form(alias=" + 2")) # mix with arithmetic 1 + tdSql.error(self.diff_query_form(alias=" + avg(c1)")) # mix with arithmetic 2 + tdSql.error(self.diff_query_form(alias=", c2")) # mix with other 1 + # tdSql.error(self.diff_query_form(table_expr="stb1")) # select stb directly + stb_join = { + "col": "stb1.c1", + "table_expr": "stb1, stb2", + "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts" + } + tdSql.error(self.diff_query_form(**stb_join)) # stb join + interval_sql = { + "condition": "where ts>0 and ts < now interval(1h) fill(next)" + } + tdSql.error(self.diff_query_form(**interval_sql)) # interval + group_normal_col = { + "table_expr": "t1", + "condition": "group by c6" + } + tdSql.error(self.diff_query_form(**group_normal_col)) # group by normal col + slimit_soffset_sql = { + "table_expr": "stb1", + "condition": "group by tbname slimit 1 soffset 1" + } + # tdSql.error(self.diff_query_form(**slimit_soffset_sql)) + order_by_tbname_sql = { + "table_expr": "stb1", + "condition": "group by tbname order by tbname" + } + tdSql.error(self.diff_query_form(**order_by_tbname_sql)) + + pass + + def diff_test_data(self, tbnum:int, data_row:int, basetime:int) -> None : + for i in range(tbnum): + for j in range(data_row): + tdSql.execute( + f"insert into t{i} values (" + f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " + f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " + f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" + ) + + tdSql.execute( + f"insert into t{i} values (" + f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, " + f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, " + f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )" + ) + tdSql.execute( + f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" + ) + + pass + + def diff_test_table(self,tbnum: int) -> None : + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute( + "create stable db.stb1 (\ + ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \ + c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\ + ) \ + tags(st1 int)" + ) + tdSql.execute( + "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)" + ) + for i in range(tbnum): + tdSql.execute(f"create table t{i} using stb1 tags({i})") + tdSql.execute(f"create table tt{i} using stb2 tags({i})") + + pass + + def diff_test_run(self) : + tdLog.printNoPrefix("==========TD-10594==========") + tbnum = 10 + nowtime = int(round(time.time() * 1000)) + per_table_rows = 10 + self.diff_test_table(tbnum) + + tdLog.printNoPrefix("######## no data test:") + self.diff_current_query() + self.diff_error_query() + + tdLog.printNoPrefix("######## insert only NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime - 5})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime + 5})") + self.diff_current_query() + self.diff_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the max(bigint/double):") + self.diff_test_table(tbnum) + tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + self.diff_current_query() + self.diff_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the min(bigint/double):") + self.diff_test_table(tbnum) + tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})") + tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})") + self.diff_current_query() + self.diff_error_query() + + tdLog.printNoPrefix("######## insert data without NULL data test:") + self.diff_test_table(tbnum) + self.diff_test_data(tbnum, per_table_rows, nowtime) + self.diff_current_query() + self.diff_error_query() + + + tdLog.printNoPrefix("######## insert data mix with NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime-(per_table_rows+3)*10})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime+(per_table_rows+3)*10})") + self.diff_current_query() + self.diff_error_query() + + + + tdLog.printNoPrefix("######## check after WAL test:") + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + self.diff_current_query() + self.diff_error_query() + + def run(self): + import traceback + try: + # run in develop branch + self.diff_test_run() + pass + except Exception as e: + traceback.print_exc() + raise e + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/2-query/histogram.py b/tests/system-test/2-query/histogram.py new file mode 100644 index 0000000000000000000000000000000000000000..2c203bdceb1c6f180fc3e653aa1dd6c62512d0e2 --- /dev/null +++ b/tests/system-test/2-query/histogram.py @@ -0,0 +1,3554 @@ +import datetime + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +PRIMARY_COL = "ts" + +INT_COL = "c1" +BINT_COL = "c2" +SINT_COL = "c3" +TINT_COL = "c4" +FLOAT_COL = "c5" +DOUBLE_COL = "c6" +BOOL_COL = "c7" + +BINARY_COL = "c8" +NCHAR_COL = "c9" +TS_COL = "c10" + +NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ] +CHAR_COL = [ BINARY_COL, NCHAR_COL, ] +BOOLEAN_COL = [ BOOL_COL, ] +TS_TYPE_COL = [ TS_COL, ] + +ALL_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BOOL_COL, BINARY_COL, NCHAR_COL, TS_COL ] + +class TDTestCase: + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def __query_condition(self,tbname): + query_condition = [f"cast({col} as bigint)" for col in ALL_COL] + for num_col in NUM_COL: + query_condition.extend( + ( + f"{tbname}.{num_col}", + f"abs( {tbname}.{num_col} )", + f"acos( {tbname}.{num_col} )", + f"asin( {tbname}.{num_col} )", + f"atan( {tbname}.{num_col} )", + f"avg( {tbname}.{num_col} )", + f"ceil( {tbname}.{num_col} )", + f"cos( {tbname}.{num_col} )", + f"count( {tbname}.{num_col} )", + f"floor( {tbname}.{num_col} )", + f"log( {tbname}.{num_col}, {tbname}.{num_col})", + f"max( {tbname}.{num_col} )", + f"min( {tbname}.{num_col} )", + f"pow( {tbname}.{num_col}, 2)", + f"round( {tbname}.{num_col} )", + f"sum( {tbname}.{num_col} )", + f"sin( {tbname}.{num_col} )", + f"sqrt( {tbname}.{num_col} )", + f"tan( {tbname}.{num_col} )", + f"cast( {tbname}.{num_col} as timestamp)", + ) + ) + [ query_condition.append(f"{num_col} + {any_col}") for any_col in ALL_COL ] + for char_col in CHAR_COL: + query_condition.extend( + ( + f"count({tbname}.{char_col})", + f"sum(cast({tbname}.{char_col}) as bigint)", + f"max(cast({tbname}.{char_col}) as bigint)", + f"min(cast({tbname}.{char_col}) as bigint)", + f"avg(cast({tbname}.{char_col}) as bigint)", + ) + ) + query_condition.extend( + ( + 1010, + ) + ) + + return query_condition + + def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False): + table_reference = tb_list[0] + join_condition = table_reference + join = "inner join" if INNER else "join" + for i in range(len(tb_list[1:])): + join_condition += f" {join} {tb_list[i+1]} on {table_reference}.{filter}={tb_list[i+1]}.{filter}" + + return join_condition + + def __where_condition(self, col=None, tbname=None, query_conditon=None): + if query_conditon and isinstance(query_conditon, str): + if query_conditon.startswith("count"): + query_conditon = query_conditon[6:-1] + elif query_conditon.startswith("max"): + query_conditon = query_conditon[4:-1] + elif query_conditon.startswith("sum"): + query_conditon = query_conditon[4:-1] + elif query_conditon.startswith("min"): + query_conditon = query_conditon[4:-1] + + if query_conditon: + return f" where {query_conditon} is not null" + if col in NUM_COL: + return f" where abs( {tbname}.{col} ) >= 0" + if col in CHAR_COL: + return f" where lower( {tbname}.{col} ) like 'bina%' or lower( {tbname}.{col} ) like '_cha%' " + if col in BOOLEAN_COL: + return f" where {tbname}.{col} in (false, true) " + if col in TS_TYPE_COL or col in PRIMARY_COL: + return f" where cast( {tbname}.{col} as binary(16) ) is not null " + + return "" + + def __group_condition(self, col, having = None): + if isinstance(col, str): + if col.startswith("count"): + col = col[6:-1] + elif col.startswith("max"): + col = col[4:-1] + elif col.startswith("sum"): + col = col[4:-1] + elif col.startswith("min"): + col = col[4:-1] + return f" group by {col} having {having}" if having else f" group by {col} " + + def __single_sql(self, select_clause, from_clause, where_condition="", group_condition=""): + if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0] != from_clause.split(".")[0]: + return + return f"select spread({select_clause}) from {from_clause} {where_condition} {group_condition}" + + @property + def __tb_list(self): + return [ + "ct1", + "ct4", + "t1", + "ct2", + "stb1", + ] + + def sql_list(self): + sqls = [] + __no_join_tblist = self.__tb_list + for tb in __no_join_tblist: + select_claus_list = self.__query_condition(tb) + for select_claus in select_claus_list: + group_claus = self.__group_condition(col=select_claus) + where_claus = self.__where_condition(query_conditon=select_claus) + having_claus = self.__group_condition(col=select_claus, having=f"{select_claus} is not null") + sqls.extend( + ( + self.__single_sql(select_claus, tb, where_claus, having_claus), + self.__single_sql(select_claus, tb,), + self.__single_sql(select_claus, tb, where_condition=where_claus), + self.__single_sql(select_claus, tb, group_condition=group_claus), + ) + ) + + # return filter(None, sqls) + return list(filter(None, sqls)) + + def __get_type(self, col): + if tdSql.cursor.istype(col, "BOOL"): + return "BOOL" + if tdSql.cursor.istype(col, "INT"): + return "INT" + if tdSql.cursor.istype(col, "BIGINT"): + return "BIGINT" + if tdSql.cursor.istype(col, "TINYINT"): + return "TINYINT" + if tdSql.cursor.istype(col, "SMALLINT"): + return "SMALLINT" + if tdSql.cursor.istype(col, "FLOAT"): + return "FLOAT" + if tdSql.cursor.istype(col, "DOUBLE"): + return "DOUBLE" + if tdSql.cursor.istype(col, "BINARY"): + return "BINARY" + if tdSql.cursor.istype(col, "NCHAR"): + return "NCHAR" + if tdSql.cursor.istype(col, "TIMESTAMP"): + return "TIMESTAMP" + if tdSql.cursor.istype(col, "JSON"): + return "JSON" + if tdSql.cursor.istype(col, "TINYINT UNSIGNED"): + return "TINYINT UNSIGNED" + if tdSql.cursor.istype(col, "SMALLINT UNSIGNED"): + return "SMALLINT UNSIGNED" + if tdSql.cursor.istype(col, "INT UNSIGNED"): + return "INT UNSIGNED" + if tdSql.cursor.istype(col, "BIGINT UNSIGNED"): + return "BIGINT UNSIGNED" + + def spread_check(self): + sqls = self.sql_list() + tdLog.printNoPrefix("===step 1: curent case, must return query OK") + for i in range(len(sqls)): + tdLog.info(f"sql: {sqls[i]}") + tdSql.query(sqls[i]) + + def __test_current(self): + tdSql.query("select spread(ts) from ct1") + tdSql.checkRows(1) + tdSql.query("select spread(c1) from ct2") + tdSql.checkRows(1) + tdSql.query("select spread(c1) from ct4 group by c1") + tdSql.checkRows(self.rows + 3) + tdSql.query("select spread(c1) from ct4 group by c7") + tdSql.checkRows(3) + tdSql.query("select spread(ct2.c1) from ct4 join ct2 on ct4.ts=ct2.ts") + tdSql.checkRows(1) + + self.spread_check() + + def __test_error(self): + + tdLog.printNoPrefix("===step 0: err case, must return err") + tdSql.error( "select spread() from ct1" ) + tdSql.error( "select spread(1, 2) from ct2" ) + tdSql.error( f"select spread({NUM_COL[0]}, {NUM_COL[1]}) from ct4" ) + tdSql.error( f"select spread({BOOLEAN_COL[0]}) from t1" ) + tdSql.error( f"select spread({CHAR_COL[0]}) from stb1" ) + + # tdSql.error( ''' select spread(['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10']) + # from ct1 + # where ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null + # group by ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] + # having ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null ''' ) + # tdSql.error( "select c1 from ct1 union select c1 from ct2 union select c1 from ct4 ") + + def all_test(self): + self.__test_error() + self.__test_current() + + def __create_tb(self): + + tdLog.printNoPrefix("==========step1:create table") + create_stb_sql = f'''create table stb1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) tags (t1 int) + ''' + create_ntb_sql = f'''create table t1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) + ''' + tdSql.execute(create_stb_sql) + tdSql.execute(create_ntb_sql) + + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2} + + def __insert_data(self, rows): + now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) + for i in range(rows): + tdSql.execute( + f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f'''insert into ct1 values + ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) + ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) + ''' + ) + + tdSql.execute( + f'''insert into ct4 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000} + ) + ( + { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000} + ) + ''' + ) + + tdSql.execute( + f'''insert into ct2 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126, + { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127, + { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + for i in range(rows): + insert_data = f'''insert into t1 values + ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, + "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) + ''' + tdSql.execute(insert_data) + tdSql.execute( + f'''insert into t1 values + ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, + "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, + "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + + def run(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + self.__create_tb() + + tdLog.printNoPrefix("==========step2:insert data") + self.rows = 10 + self.__insert_data(self.rows) + + tdLog.printNoPrefix("==========step3:all check") + self.all_test() + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.execute("use db") + + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) + + + + + +################################################################### +# Copyright (c) 2021 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TD-11222]: Histogram function + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def run(self): + print("running {}".format(__file__)) + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db") + tdSql.execute('use db') + + #Prepare data + tdSql.execute("create stable stb (col_timestamp timestamp, col_tinyint tinyint, col_smallint smallint, col_int int, col_bigint bigint, col_float float, col_double double, col_bool bool, col_binary binary(10), col_nchar nchar(10)) \ + tags(tag_timestamp timestamp, tag_tinyint tinyint, tag_smallint smallint, tag_int int, tag_bigint bigint, tag_float float, tag_double double, tag_bool bool, tag_binary binary(10), tag_nchar nchar(10));") + tdSql.execute("create table ctb using stb tags (now, 1, 1, 1, 1, 1.0, 1.0, true, 'abc', 'abc');") + tdSql.execute("create table tb (col_timestamp timestamp, col_tinyint tinyint, col_smallint smallint, col_int int, col_bigint bigint, col_float float, col_double double, col_bool bool, col_binary binary(10), col_nchar nchar(10));") + + tdSql.execute("insert into ctb values (now, -9, -9, -9, -9, -9.5, -9.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb values (now + 1s, -1, -1, -1, -1, -1.5, -1.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb values (now + 2s, 1, 1, 1, 1, 1.5, 1.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb values (now + 3s, 2, 2, 2, 2, 2.5, 2.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb values (now + 4s, 3, 3, 3, 3, 3.5, 3.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb values (now + 5s, 4, 4, 4, 4, 4.5, 4.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb values (now + 6s, 5, 5, 5, 5, 5.5, 5.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb values (now + 7s, 6, 6, 6, 6, 6.5, 6.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb values (now + 8s, 7, 7, 7, 7, 7.5, 7.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb values (now + 9s, 8, 8, 8, 8, 8.5, 8.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb values (now + 10s, 9, 9, 9, 9, 9.5, 9.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb values (now + 11s, 10, 10, 10, 10, 10.5, 10.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb values (now + 12s, 15, 15, 15, 15, 15.5, 15.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb values (now + 13s, 20, 20, 20, 20, 20.5, 20.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb values (now + 14s, 99, 99, 99, 99, 99.5, 99.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb values (now + 15s, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);") + + tdSql.execute("insert into tb values (now, -9, -9, -9, -9, -9.5, -9.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb values (now + 1s, -1, -1, -1, -1, -1.5, -1.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb values (now + 2s, 1, 1, 1, 1, 1.5, 1.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb values (now + 3s, 2, 2, 2, 2, 2.5, 2.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb values (now + 4s, 3, 3, 3, 3, 3.5, 3.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb values (now + 5s, 4, 4, 4, 4, 4.5, 4.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb values (now + 6s, 5, 5, 5, 5, 5.5, 5.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb values (now + 7s, 6, 6, 6, 6, 6.5, 6.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb values (now + 8s, 7, 7, 7, 7, 7.5, 7.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb values (now + 9s, 8, 8, 8, 8, 8.5, 8.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb values (now + 10s, 9, 9, 9, 9, 9.5, 9.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb values (now + 11s, 10, 10, 10, 10, 10.5, 10.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb values (now + 12s, 15, 15, 15, 15, 15.5, 15.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb values (now + 13s, 20, 20, 20, 20, 20.5, 20.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb values (now + 14s, 99, 99, 99, 99, 99.5, 99.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb values (now + 15s, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);") + + #execute query + print("============== STEP 1: column types ================== ") + #Supported column types + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.checkRows(3); + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5,7]", 0) from tb;') + tdSql.checkRows(3); + + tdSql.query('select histogram(col_smallint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.checkRows(3); + tdSql.query('select histogram(col_smallint, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.query('select histogram(col_smallint, "user_input", "[1,3,5,7]", 0) from tb;') + tdSql.checkRows(3); + + tdSql.query('select histogram(col_int, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.checkRows(3); + tdSql.query('select histogram(col_int, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.query('select histogram(col_int, "user_input", "[1,3,5,7]", 0) from tb;') + tdSql.checkRows(3); + + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.checkRows(3); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7]", 0) from tb;') + tdSql.checkRows(3); + + tdSql.query('select histogram(col_float, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.checkRows(3); + tdSql.query('select histogram(col_float, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.query('select histogram(col_float, "user_input", "[1,3,5,7]", 0) from tb;') + tdSql.checkRows(3); + + tdSql.query('select histogram(col_double, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.checkRows(3); + tdSql.query('select histogram(col_double, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.query('select histogram(col_double, "user_input", "[1,3,5,7]", 0) from tb;') + tdSql.checkRows(3); + + #Unsupported column types + tdSql.error('select histogram(col_timestamp, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_timestamp, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.error('select histogram(col_timestamp, "user_input", "[1,3,5,7]", 0) from tb;') + + tdSql.error('select histogram(col_bool, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_bool, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.error('select histogram(col_bool, "user_input", "[1,3,5,7]", 0) from tb;') + + tdSql.error('select histogram(col_binary, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_binary, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.error('select histogram(col_binary, "user_input", "[1,3,5,7]", 0) from tb;') + + tdSql.error('select histogram(col_nchar, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_nchar, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.error('select histogram(col_nchar, "user_input", "[1,3,5,7]", 0) from tb;') + + tdSql.error('select histogram(col, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.error('select histogram(col, "user_input", "[1,3,5,7]", 0) from tb;') + + #Unsupported tags + tdSql.error('select histogram(tag_timestamp, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(tag_timestamp, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.error('select histogram(tag_timestamp, "user_input", "[1,3,5,7]", 0) from tb;') + + tdSql.error('select histogram(tag_tinyint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(tag_tinyint, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.error('select histogram(tag_tinyint, "user_input", "[1,3,5,7]", 0) from tb;') + + tdSql.error('select histogram(tag_smallint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(tag_smallint, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.error('select histogram(tag_smallint, "user_input", "[1,3,5,7]", 0) from tb;') + + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0) from tb;') + + tdSql.error('select histogram(tag_bigint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(tag_bigint, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.error('select histogram(tag_bigint, "user_input", "[1,3,5,7]", 0) from tb;') + + tdSql.error('select histogram(tag_float, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(tag_float, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.error('select histogram(tag_float, "user_input", "[1,3,5,7]", 0) from tb;') + + tdSql.error('select histogram(tag_double, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(tag_double, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.error('select histogram(tag_double, "user_input", "[1,3,5,7]", 0) from tb;') + + tdSql.error('select histogram(tag_bool, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(tag_bool, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.error('select histogram(tag_bool, "user_input", "[1,3,5,7]", 0) from tb;') + + tdSql.error('select histogram(tag_binary, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(tag_binary, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.error('select histogram(tag_binary, "user_input", "[1,3,5,7]", 0) from tb;') + + tdSql.error('select histogram(tag_nchar, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(tag_nchar, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.error('select histogram(tag_nchar, "user_input", "[1,3,5,7]", 0) from tb;') + + + print("============== STEP 2: bin types ================== ") + ## user_input ## + #TINYINT + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5]", 0) from stb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5]", 0) from ctb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5]", 0) from tb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + + + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5,7]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + + tdSql.query('select histogram(col_tinyint, "user_input", "[0,10,20,100]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[0,10,20,100]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[0,10,20,100]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + + tdSql.query('select histogram(col_tinyint, "user_input", "[-10,10,20,100]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":12}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[-10,10,20,100]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":12}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[-10,10,20,100]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":12}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + + tdSql.query('select histogram(col_tinyint, "user_input", "[-8.9,9.9,19.9,99.9]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-8.9, "upper_bin":9.9, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":99.9, "count":2}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[-8.9,9.9,19.9,99.9]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-8.9, "upper_bin":9.9, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":99.9, "count":2}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[-8.9,9.9,19.9,99.9]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-8.9, "upper_bin":9.9, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":99.9, "count":2}'); + + tdSql.query('select histogram(col_tinyint, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + + #SMALLINT + tdSql.query('select histogram(col_smallint, "user_input", "[1,3,5]", 0) from stb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.query('select histogram(col_smallint, "user_input", "[1,3,5]", 0) from ctb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.query('select histogram(col_smallint, "user_input", "[1,3,5]", 0) from tb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + + tdSql.query('select histogram(col_smallint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select histogram(col_smallint, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select histogram(col_smallint, "user_input", "[1,3,5,7]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + + tdSql.query('select histogram(col_smallint, "user_input", "[0,10,20,100]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + tdSql.query('select histogram(col_smallint, "user_input", "[0,10,20,100]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + tdSql.query('select histogram(col_smallint, "user_input", "[0,10,20,100]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + + tdSql.query('select histogram(col_smallint, "user_input", "[-10,10,20,100]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":12}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + tdSql.query('select histogram(col_smallint, "user_input", "[-10,10,20,100]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":12}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + tdSql.query('select histogram(col_smallint, "user_input", "[-10,10,20,100]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":12}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + + tdSql.query('select histogram(col_smallint, "user_input", "[-8.9,9.9,19.9,99.9]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-8.9, "upper_bin":9.9, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":99.9, "count":2}'); + tdSql.query('select histogram(col_smallint, "user_input", "[-8.9,9.9,19.9,99.9]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-8.9, "upper_bin":9.9, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":99.9, "count":2}'); + tdSql.query('select histogram(col_smallint, "user_input", "[-8.9,9.9,19.9,99.9]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-8.9, "upper_bin":9.9, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":99.9, "count":2}'); + + tdSql.query('select histogram(col_smallint, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + tdSql.query('select histogram(col_smallint, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + tdSql.query('select histogram(col_smallint, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + + #INT + tdSql.query('select histogram(col_int, "user_input", "[1,3,5]", 0) from stb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[1,3,5]", 0) from ctb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[1,3,5]", 0) from tb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + + tdSql.query('select histogram(col_int, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[1,3,5,7]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + + tdSql.query('select histogram(col_int, "user_input", "[0,10,20,100]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + tdSql.query('select histogram(col_int, "user_input", "[0,10,20,100]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + tdSql.query('select histogram(col_int, "user_input", "[0,10,20,100]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + + tdSql.query('select histogram(col_int, "user_input", "[-10,10,20,100]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":12}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + tdSql.query('select histogram(col_int, "user_input", "[-10,10,20,100]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":12}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + tdSql.query('select histogram(col_int, "user_input", "[-10,10,20,100]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":12}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + + tdSql.query('select histogram(col_int, "user_input", "[-8.9,9.9,19.9,99.9]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-8.9, "upper_bin":9.9, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":99.9, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[-8.9,9.9,19.9,99.9]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-8.9, "upper_bin":9.9, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":99.9, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[-8.9,9.9,19.9,99.9]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-8.9, "upper_bin":9.9, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":99.9, "count":2}'); + + tdSql.query('select histogram(col_int, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + + #BIGINT + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5]", 0) from stb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5]", 0) from ctb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5]", 0) from tb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + + tdSql.query('select histogram(col_bigint, "user_input", "[0,10,20,100]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + tdSql.query('select histogram(col_bigint, "user_input", "[0,10,20,100]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + tdSql.query('select histogram(col_bigint, "user_input", "[0,10,20,100]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + + tdSql.query('select histogram(col_bigint, "user_input", "[-10,10,20,100]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":12}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + tdSql.query('select histogram(col_bigint, "user_input", "[-10,10,20,100]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":12}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + tdSql.query('select histogram(col_bigint, "user_input", "[-10,10,20,100]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":12}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + + tdSql.query('select histogram(col_bigint, "user_input", "[-8.9,9.9,19.9,99.9]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-8.9, "upper_bin":9.9, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":99.9, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[-8.9,9.9,19.9,99.9]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-8.9, "upper_bin":9.9, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":99.9, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[-8.9,9.9,19.9,99.9]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-8.9, "upper_bin":9.9, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":99.9, "count":2}'); + + tdSql.query('select histogram(col_bigint, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + + #FLOAT + tdSql.query('select histogram(col_float, "user_input", "[1,3,5]", 0) from stb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.query('select histogram(col_float, "user_input", "[1,3,5]", 0) from ctb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.query('select histogram(col_float, "user_input", "[1,3,5]", 0) from tb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + + tdSql.query('select histogram(col_float, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select histogram(col_float, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select histogram(col_float, "user_input", "[1,3,5,7]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + + tdSql.query('select histogram(col_float, "user_input", "[0,10,20,100]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":9}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":2}'); + tdSql.query('select histogram(col_float, "user_input", "[0,10,20,100]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":9}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":2}'); + tdSql.query('select histogram(col_float, "user_input", "[0,10,20,100]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":9}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":2}'); + + tdSql.query('select histogram(col_float, "user_input", "[-10,10,20,100]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":2}'); + tdSql.query('select histogram(col_float, "user_input", "[-10,10,20,100]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":2}'); + tdSql.query('select histogram(col_float, "user_input", "[-10,10,20,100]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":2}'); + + tdSql.query('select histogram(col_float, "user_input", "[-9.4,9.6,20.4,99.9]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-9.4, "upper_bin":9.6, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.6, "upper_bin":20.4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20.4, "upper_bin":99.9, "count":2}'); + tdSql.query('select histogram(col_float, "user_input", "[-9.4,9.6,20.4,99.9]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-9.4, "upper_bin":9.6, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.6, "upper_bin":20.4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20.4, "upper_bin":99.9, "count":2}'); + tdSql.query('select histogram(col_float, "user_input", "[-9.4,9.6,20.4,99.9]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-9.4, "upper_bin":9.6, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.6, "upper_bin":20.4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20.4, "upper_bin":99.9, "count":2}'); + + tdSql.query('select histogram(col_float, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + tdSql.query('select histogram(col_float, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + tdSql.query('select histogram(col_float, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + + #DOUBLE + tdSql.query('select histogram(col_double, "user_input", "[1,3,5]", 0) from stb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.query('select histogram(col_double, "user_input", "[1,3,5]", 0) from ctb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.query('select histogram(col_double, "user_input", "[1,3,5]", 0) from tb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + + tdSql.query('select histogram(col_double, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select histogram(col_double, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select histogram(col_double, "user_input", "[1,3,5,7]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + + tdSql.query('select histogram(col_double, "user_input", "[0,10,20,100]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":9}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":2}'); + tdSql.query('select histogram(col_double, "user_input", "[0,10,20,100]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":9}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":2}'); + tdSql.query('select histogram(col_double, "user_input", "[0,10,20,100]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":9}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":2}'); + + tdSql.query('select histogram(col_double, "user_input", "[-10,10,20,100]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":2}'); + tdSql.query('select histogram(col_double, "user_input", "[-10,10,20,100]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":2}'); + tdSql.query('select histogram(col_double, "user_input", "[-10,10,20,100]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":2}'); + + tdSql.query('select histogram(col_double, "user_input", "[-9.4,9.6,20.4,99.9]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-9.4, "upper_bin":9.6, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.6, "upper_bin":20.4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20.4, "upper_bin":99.9, "count":2}'); + tdSql.query('select histogram(col_double, "user_input", "[-9.4,9.6,20.4,99.9]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-9.4, "upper_bin":9.6, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.6, "upper_bin":20.4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20.4, "upper_bin":99.9, "count":2}'); + tdSql.query('select histogram(col_double, "user_input", "[-9.4,9.6,20.4,99.9]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-9.4, "upper_bin":9.6, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.6, "upper_bin":20.4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20.4, "upper_bin":99.9, "count":2}'); + + tdSql.query('select histogram(col_double, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + tdSql.query('select histogram(col_double, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + tdSql.query('select histogram(col_double, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + + #ERROR CASE + tdSql.error('select histogram(col_double, 1, "[1,5,3,7]", 0) from stb;') + tdSql.error('select histogram(col_double, 1, "[1,5,3,7]", 0) from ctb;') + tdSql.error('select histogram(col_double, 1, "[1,5,3,7]", 0) from tb;') + tdSql.error('select histogram(col_double, -1.0, "[1,5,3,7]", 0) from stb;') + tdSql.error('select histogram(col_double, -1.0, "[1,5,3,7]", 0) from ctb;') + tdSql.error('select histogram(col_double, -1.0, "[1,5,3,7]", 0) from tb;') + tdSql.error('select histogram(col_double, true, "[1,5,3,7]", 0) from stb;') + tdSql.error('select histogram(col_double, false, "[1,5,3,7]", 0) from ctb;') + tdSql.error('select histogram(col_double, true, "[1,5,3,7]", 0) from tb;') + tdSql.error('select histogram(col_double, "user", "[1,5,3,7]", 0) from stb;') + tdSql.error('select histogram(col_double, "user", "[1,5,3,7]", 0) from ctb;') + tdSql.error('select histogram(col_double, "user", "[1,5,3,7]", 0) from tb;') + tdSql.error('select histogram(col_double, "user_input", "[1,5,3,7]", 0) from stb;') + tdSql.error('select histogram(col_double, "user_input", "[1,5,3,7]", 0) from ctb;') + tdSql.error('select histogram(col_double, "user_input", "[1,5,3,7]", 0) from tb;') + tdSql.error('select histogram(col_double, "user_input", "[1,-1,3,-3]", 0) from stb;') + tdSql.error('select histogram(col_double, "user_input", "[1,-1,3,-3]", 0) from ctb;') + tdSql.error('select histogram(col_double, "user_input", "[1,-1,3,-3]", 0) from tb;') + tdSql.error('select histogram(col_double, "user_input", "[1.0,5.5,3.3,7.7]", 0) from stb;') + tdSql.error('select histogram(col_double, "user_input", "[1.0,5.5,3.3,7.7]", 0) from ctb;') + tdSql.error('select histogram(col_double, "user_input", "[1.0,5.5,3.3,7.7]", 0) from tb;') + tdSql.error('select histogram(col_double, "user_input", "[1,1,1]", 0) from stb;') + tdSql.error('select histogram(col_double, "user_input", "[1,1,1]", 0) from ctb;') + tdSql.error('select histogram(col_double, "user_input", "[1,1,1]", 0) from tb;') + tdSql.error('select histogram(col_double, "user_input", "[-1,-1,1]", 0) from stb;') + tdSql.error('select histogram(col_double, "user_input", "[-1,-1,1]", 0) from ctb;') + tdSql.error('select histogram(col_double, "user_input", "[-1,-1,1]", 0) from tb;') + tdSql.error('select histogram(col_double, "user_input", "[false,3,5]", 0) from stb;') + tdSql.error('select histogram(col_double, "user_input", "[false,3,5]", 0) from ctb;') + tdSql.error('select histogram(col_double, "user_input", "[false,3,5]", 0) from tb;') + tdSql.error('select histogram(col_double, "user_input", "[1,true,5]", 0) from stb;') + tdSql.error('select histogram(col_double, "user_input", "[1,true,5]", 0) from ctb;') + tdSql.error('select histogram(col_double, "user_input", "[1,true,5]", 0) from tb;') + tdSql.error('select histogram(col_double, "user_input", "[1.0,"abc",5]", 0) from stb;') + tdSql.error('select histogram(col_double, "user_input", "[1.0,"abc",5]", 0) from ctb;') + tdSql.error('select histogram(col_double, "user_input", "[1.0,"abc",5]", 0) from tb;') + tdSql.error('select histogram(col_double, "user_input", "[1.0, 5, "中文"]", 0) from stb;') + tdSql.error('select histogram(col_double, "user_input", "[1.0, 5, "中文"]", 0) from ctb;') + tdSql.error('select histogram(col_double, "user_input", "[1.0, 5, "中文"]", 0) from tb;') + tdSql.error('select histogram(col_double, "user_input", "{1.0, 3.0, 5.0}", 0) from stb;') + tdSql.error('select histogram(col_double, "user_input", "{1.0, 3.0, 5.0}", 0) from ctb;') + tdSql.error('select histogram(col_double, "user_input", "{1.0, 3.0, 5.0}", 0) from tb;') + tdSql.error('select histogram(col_double, \'user_input\', \'{"start": 1.0, "width": 3.0, "count": 5, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_double, \'user_input\', \'{"start": 1.0, "width": 3.0, "count": 5, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_double, \'user_input\', \'{"start": 1.0, "width": 3.0, "count": 5, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_double, \'user_input\', \'{"start": 1.0, "factor": 3.0, "count": 5, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_double, \'user_input\', \'{"start": 1.0, "factor": 3.0, "count": 5, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_double, \'user_input\', \'{"start": 1.0, "factor": 3.0, "count": 5, "infinity": true}\', 0) from tb;') + + + ## linear_bins ## + #INTEGER + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 1, "width": 3, "count": 8, "infinity": false}\', 0) from stb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":4, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":4, "upper_bin":7, "count":3}'); + tdSql.checkData(2, 0, '{"lower_bin":7, "upper_bin":10, "count":3}'); + tdSql.checkData(3, 0, '{"lower_bin":10, "upper_bin":13, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":13, "upper_bin":16, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":16, "upper_bin":19, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":19, "upper_bin":22, "count":1}'); + tdSql.checkData(7, 0, '{"lower_bin":22, "upper_bin":25, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 1, "width": 3, "count": 8, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":4, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":4, "upper_bin":7, "count":3}'); + tdSql.checkData(2, 0, '{"lower_bin":7, "upper_bin":10, "count":3}'); + tdSql.checkData(3, 0, '{"lower_bin":10, "upper_bin":13, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":13, "upper_bin":16, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":16, "upper_bin":19, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":19, "upper_bin":22, "count":1}'); + tdSql.checkData(7, 0, '{"lower_bin":22, "upper_bin":25, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 1, "width": 3, "count": 8, "infinity": false}\', 0) from tb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":4, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":4, "upper_bin":7, "count":3}'); + tdSql.checkData(2, 0, '{"lower_bin":7, "upper_bin":10, "count":3}'); + tdSql.checkData(3, 0, '{"lower_bin":10, "upper_bin":13, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":13, "upper_bin":16, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":16, "upper_bin":19, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":19, "upper_bin":22, "count":1}'); + tdSql.checkData(7, 0, '{"lower_bin":22, "upper_bin":25, "count":0}'); + + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": -10.0, "width": 3.0, "count": 8, "infinity": false}\', 0) from stb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":-7, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":-7, "upper_bin":-4, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-4, "upper_bin":-1, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":-1, "upper_bin":2, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":2, "upper_bin":5, "count":3}'); + tdSql.checkData(5, 0, '{"lower_bin":5, "upper_bin":8, "count":3}'); + tdSql.checkData(6, 0, '{"lower_bin":8, "upper_bin":11, "count":2}'); + tdSql.checkData(7, 0, '{"lower_bin":11, "upper_bin":14, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": -10.0, "width": 3.0, "count": 8, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":-7, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":-7, "upper_bin":-4, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-4, "upper_bin":-1, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":-1, "upper_bin":2, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":2, "upper_bin":5, "count":3}'); + tdSql.checkData(5, 0, '{"lower_bin":5, "upper_bin":8, "count":3}'); + tdSql.checkData(6, 0, '{"lower_bin":8, "upper_bin":11, "count":2}'); + tdSql.checkData(7, 0, '{"lower_bin":11, "upper_bin":14, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": -10.0, "width": 3.0, "count": 8, "infinity": false}\', 0) from tb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":-7, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":-7, "upper_bin":-4, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-4, "upper_bin":-1, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":-1, "upper_bin":2, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":2, "upper_bin":5, "count":3}'); + tdSql.checkData(5, 0, '{"lower_bin":5, "upper_bin":8, "count":3}'); + tdSql.checkData(6, 0, '{"lower_bin":8, "upper_bin":11, "count":2}'); + tdSql.checkData(7, 0, '{"lower_bin":11, "upper_bin":14, "count":0}'); + + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": -2.5, "width": 0.5, "count": 8, "infinity": false}\', 0) from stb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":-2.5, "upper_bin":-2, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-2, "upper_bin":-1.5, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-1.5, "upper_bin":-1, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":-1, "upper_bin":-0.5, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":-0.5, "upper_bin":0, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":0, "upper_bin":0.5, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":0.5, "upper_bin":1, "count":1}'); + tdSql.checkData(7, 0, '{"lower_bin":1, "upper_bin":1.5, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": -2.5, "width": 0.5, "count": 8, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":-2.5, "upper_bin":-2, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-2, "upper_bin":-1.5, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-1.5, "upper_bin":-1, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":-1, "upper_bin":-0.5, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":-0.5, "upper_bin":0, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":0, "upper_bin":0.5, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":0.5, "upper_bin":1, "count":1}'); + tdSql.checkData(7, 0, '{"lower_bin":1, "upper_bin":1.5, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": -2.5, "width": 0.5, "count": 8, "infinity": false}\', 0) from tb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":-2.5, "upper_bin":-2, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-2, "upper_bin":-1.5, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-1.5, "upper_bin":-1, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":-1, "upper_bin":-0.5, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":-0.5, "upper_bin":0, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":0, "upper_bin":0.5, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":0.5, "upper_bin":1, "count":1}'); + tdSql.checkData(7, 0, '{"lower_bin":1, "upper_bin":1.5, "count":0}'); + + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 4, "width": -0.5, "count": 10, "infinity": false}\', 0) from stb;') + tdSql.checkRows(10); + tdSql.checkData(0, 0, '{"lower_bin":3.5, "upper_bin":4, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":3.5, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":2.5, "upper_bin":3, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":2, "upper_bin":2.5, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":1.5, "upper_bin":2, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":1, "upper_bin":1.5, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":0.5, "upper_bin":1, "count":1}'); + tdSql.checkData(7, 0, '{"lower_bin":0, "upper_bin":0.5, "count":0}'); + tdSql.checkData(8, 0, '{"lower_bin":-0.5, "upper_bin":0, "count":0}'); + tdSql.checkData(9, 0, '{"lower_bin":-1, "upper_bin":-0.5, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 4, "width": -0.5, "count": 10, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(10); + tdSql.checkData(0, 0, '{"lower_bin":3.5, "upper_bin":4, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":3.5, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":2.5, "upper_bin":3, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":2, "upper_bin":2.5, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":1.5, "upper_bin":2, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":1, "upper_bin":1.5, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":0.5, "upper_bin":1, "count":1}'); + tdSql.checkData(7, 0, '{"lower_bin":0, "upper_bin":0.5, "count":0}'); + tdSql.checkData(8, 0, '{"lower_bin":-0.5, "upper_bin":0, "count":0}'); + tdSql.checkData(9, 0, '{"lower_bin":-1, "upper_bin":-0.5, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 4, "width": -0.5, "count": 10, "infinity": false}\', 0) from tb;') + tdSql.checkRows(10); + tdSql.checkData(0, 0, '{"lower_bin":3.5, "upper_bin":4, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":3.5, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":2.5, "upper_bin":3, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":2, "upper_bin":2.5, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":1.5, "upper_bin":2, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":1, "upper_bin":1.5, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":0.5, "upper_bin":1, "count":1}'); + tdSql.checkData(7, 0, '{"lower_bin":0, "upper_bin":0.5, "count":0}'); + tdSql.checkData(8, 0, '{"lower_bin":-0.5, "upper_bin":0, "count":0}'); + tdSql.checkData(9, 0, '{"lower_bin":-1, "upper_bin":-0.5, "count":0}'); + + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 1, "width": 0.5, "count": 1.9999, "infinity": false}\', 0) from stb;') + tdSql.checkRows(1); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":1.5, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 1, "width": 0.5, "count": 1.9999, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(1); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":1.5, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 1, "width": 0.5, "count": 1.9999, "infinity": false}\', 0) from tb;') + tdSql.checkRows(1); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":1.5, "count":0}'); + + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 1, "width": 0.5, "count": 1.99999999999999999, "infinity": false}\', 0) from stb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":1.5, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":1.5, "upper_bin":2, "count":1}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 1, "width": 0.5, "count": 1.99999999999999999, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":1.5, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":1.5, "upper_bin":2, "count":1}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 1, "width": 0.5, "count": 1.99999999999999999, "infinity": false}\', 0) from tb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":1.5, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":1.5, "upper_bin":2, "count":1}'); + + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 5, "count": 5, "infinity": true}\', 0) from stb;') + tdSql.checkRows(7); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":0, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":0, "upper_bin":5, "count":5}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(3, 0, '{"lower_bin":10, "upper_bin":15, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":15, "upper_bin":20, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":20, "upper_bin":25, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":25, "upper_bin":inf, "count":1}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 5, "count": 5, "infinity": true}\', 0) from ctb;') + tdSql.checkRows(7); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":0, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":0, "upper_bin":5, "count":5}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(3, 0, '{"lower_bin":10, "upper_bin":15, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":15, "upper_bin":20, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":20, "upper_bin":25, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":25, "upper_bin":inf, "count":1}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 5, "count": 5, "infinity": true}\', 0) from tb;') + tdSql.checkRows(7); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":0, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":0, "upper_bin":5, "count":5}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(3, 0, '{"lower_bin":10, "upper_bin":15, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":15, "upper_bin":20, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":20, "upper_bin":25, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":25, "upper_bin":inf, "count":1}'); + + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": -1.76e+308, "width": 5, "count": 1, "infinity": true}\', 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-1.76e+308, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-1.76e+308, "upper_bin":-1.76e+308, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-1.76e+308, "upper_bin":inf, "count":15}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": -1.76e+308, "width": 5, "count": 1, "infinity": true}\', 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-1.76e+308, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-1.76e+308, "upper_bin":-1.76e+308, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-1.76e+308, "upper_bin":inf, "count":15}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": -1.76e+308, "width": 5, "count": 1, "infinity": true}\', 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-1.76e+308, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-1.76e+308, "upper_bin":-1.76e+308, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-1.76e+308, "upper_bin":inf, "count":15}'); + + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 10, "width": -5, "count": 3, "infinity": true}\', 0) from stb;') + tdSql.checkRows(5); + tdSql.checkData(0, 0, '{"lower_bin":10, "upper_bin":inf, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(2, 0, '{"lower_bin":0, "upper_bin":5, "count":5}'); + tdSql.checkData(3, 0, '{"lower_bin":-5, "upper_bin":0, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":-inf, "upper_bin":-5, "count":1}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 10, "width": -5, "count": 3, "infinity": true}\', 0) from ctb;') + tdSql.checkRows(5); + tdSql.checkData(0, 0, '{"lower_bin":10, "upper_bin":inf, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(2, 0, '{"lower_bin":0, "upper_bin":5, "count":5}'); + tdSql.checkData(3, 0, '{"lower_bin":-5, "upper_bin":0, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":-inf, "upper_bin":-5, "count":1}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 10, "width": -5, "count": 3, "infinity": true}\', 0) from tb;') + tdSql.checkRows(5); + tdSql.checkData(0, 0, '{"lower_bin":10, "upper_bin":inf, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(2, 0, '{"lower_bin":0, "upper_bin":5, "count":5}'); + tdSql.checkData(3, 0, '{"lower_bin":-5, "upper_bin":0, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":-inf, "upper_bin":-5, "count":1}'); + + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": -0.7e+308, "width": 0.7e+308, "count": 2, "infinity": false}\', 0) from stb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":-7e+307, "upper_bin":0, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":0, "upper_bin":7e+307, "count":13}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": -0.7e+308, "width": 0.7e+308, "count": 2, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":-7e+307, "upper_bin":0, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":0, "upper_bin":7e+307, "count":13}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": -0.7e+308, "width": 0.7e+308, "count": 2, "infinity": false}\', 0) from tb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":-7e+307, "upper_bin":0, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":0, "upper_bin":7e+307, "count":13}'); + + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": -0.7e+308, "width": 0.7e+308, "count": 2, "infinity": true}\', 0) from stb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-7e+307, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-7e+307, "upper_bin":0, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":0, "upper_bin":7e+307, "count":13}'); + tdSql.checkData(3, 0, '{"lower_bin":7e+307, "upper_bin":inf, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": -0.7e+308, "width": 0.7e+308, "count": 2, "infinity": true}\', 0) from ctb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-7e+307, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-7e+307, "upper_bin":0, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":0, "upper_bin":7e+307, "count":13}'); + tdSql.checkData(3, 0, '{"lower_bin":7e+307, "upper_bin":inf, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": -0.7e+308, "width": 0.7e+308, "count": 2, "infinity": true}\', 0) from tb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-7e+307, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-7e+307, "upper_bin":0, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":0, "upper_bin":7e+307, "count":13}'); + tdSql.checkData(3, 0, '{"lower_bin":7e+307, "upper_bin":inf, "count":0}'); + + #FLOATING NUMBER + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 1, "width": 3, "count": 8, "infinity": false}\', 0) from stb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":4, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":4, "upper_bin":7, "count":3}'); + tdSql.checkData(2, 0, '{"lower_bin":7, "upper_bin":10, "count":3}'); + tdSql.checkData(3, 0, '{"lower_bin":10, "upper_bin":13, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":13, "upper_bin":16, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":16, "upper_bin":19, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":19, "upper_bin":22, "count":1}'); + tdSql.checkData(7, 0, '{"lower_bin":22, "upper_bin":25, "count":0}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 1, "width": 3, "count": 8, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":4, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":4, "upper_bin":7, "count":3}'); + tdSql.checkData(2, 0, '{"lower_bin":7, "upper_bin":10, "count":3}'); + tdSql.checkData(3, 0, '{"lower_bin":10, "upper_bin":13, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":13, "upper_bin":16, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":16, "upper_bin":19, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":19, "upper_bin":22, "count":1}'); + tdSql.checkData(7, 0, '{"lower_bin":22, "upper_bin":25, "count":0}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 1, "width": 3, "count": 8, "infinity": false}\', 0) from tb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":4, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":4, "upper_bin":7, "count":3}'); + tdSql.checkData(2, 0, '{"lower_bin":7, "upper_bin":10, "count":3}'); + tdSql.checkData(3, 0, '{"lower_bin":10, "upper_bin":13, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":13, "upper_bin":16, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":16, "upper_bin":19, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":19, "upper_bin":22, "count":1}'); + tdSql.checkData(7, 0, '{"lower_bin":22, "upper_bin":25, "count":0}'); + + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": -10.0, "width": 3.0, "count": 8, "infinity": false}\', 0) from stb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":-7, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":-7, "upper_bin":-4, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-4, "upper_bin":-1, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":-1, "upper_bin":2, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":2, "upper_bin":5, "count":3}'); + tdSql.checkData(5, 0, '{"lower_bin":5, "upper_bin":8, "count":3}'); + tdSql.checkData(6, 0, '{"lower_bin":8, "upper_bin":11, "count":3}'); + tdSql.checkData(7, 0, '{"lower_bin":11, "upper_bin":14, "count":0}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": -10.0, "width": 3.0, "count": 8, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":-7, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":-7, "upper_bin":-4, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-4, "upper_bin":-1, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":-1, "upper_bin":2, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":2, "upper_bin":5, "count":3}'); + tdSql.checkData(5, 0, '{"lower_bin":5, "upper_bin":8, "count":3}'); + tdSql.checkData(6, 0, '{"lower_bin":8, "upper_bin":11, "count":3}'); + tdSql.checkData(7, 0, '{"lower_bin":11, "upper_bin":14, "count":0}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": -10.0, "width": 3.0, "count": 8, "infinity": false}\', 0) from tb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":-7, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":-7, "upper_bin":-4, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-4, "upper_bin":-1, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":-1, "upper_bin":2, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":2, "upper_bin":5, "count":3}'); + tdSql.checkData(5, 0, '{"lower_bin":5, "upper_bin":8, "count":3}'); + tdSql.checkData(6, 0, '{"lower_bin":8, "upper_bin":11, "count":3}'); + tdSql.checkData(7, 0, '{"lower_bin":11, "upper_bin":14, "count":0}'); + + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": -2.5, "width": 0.5, "count": 8, "infinity": false}\', 0) from stb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":-2.5, "upper_bin":-2, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-2, "upper_bin":-1.5, "count":1}'); + tdSql.checkData(2, 0, '{"lower_bin":-1.5, "upper_bin":-1, "count":0}'); + tdSql.checkData(3, 0, '{"lower_bin":-1, "upper_bin":-0.5, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":-0.5, "upper_bin":0, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":0, "upper_bin":0.5, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":0.5, "upper_bin":1, "count":0}'); + tdSql.checkData(7, 0, '{"lower_bin":1, "upper_bin":1.5, "count":1}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": -2.5, "width": 0.5, "count": 8, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":-2.5, "upper_bin":-2, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-2, "upper_bin":-1.5, "count":1}'); + tdSql.checkData(2, 0, '{"lower_bin":-1.5, "upper_bin":-1, "count":0}'); + tdSql.checkData(3, 0, '{"lower_bin":-1, "upper_bin":-0.5, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":-0.5, "upper_bin":0, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":0, "upper_bin":0.5, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":0.5, "upper_bin":1, "count":0}'); + tdSql.checkData(7, 0, '{"lower_bin":1, "upper_bin":1.5, "count":1}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": -2.5, "width": 0.5, "count": 8, "infinity": false}\', 0) from tb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":-2.5, "upper_bin":-2, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-2, "upper_bin":-1.5, "count":1}'); + tdSql.checkData(2, 0, '{"lower_bin":-1.5, "upper_bin":-1, "count":0}'); + tdSql.checkData(3, 0, '{"lower_bin":-1, "upper_bin":-0.5, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":-0.5, "upper_bin":0, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":0, "upper_bin":0.5, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":0.5, "upper_bin":1, "count":0}'); + tdSql.checkData(7, 0, '{"lower_bin":1, "upper_bin":1.5, "count":1}'); + + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 4, "width": -0.5, "count": 10, "infinity": false}\', 0) from stb;') + tdSql.checkRows(10); + tdSql.checkData(0, 0, '{"lower_bin":3.5, "upper_bin":4, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":3.5, "count":1}'); + tdSql.checkData(2, 0, '{"lower_bin":2.5, "upper_bin":3, "count":0}'); + tdSql.checkData(3, 0, '{"lower_bin":2, "upper_bin":2.5, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":1.5, "upper_bin":2, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":1, "upper_bin":1.5, "count":1}'); + tdSql.checkData(6, 0, '{"lower_bin":0.5, "upper_bin":1, "count":0}'); + tdSql.checkData(7, 0, '{"lower_bin":0, "upper_bin":0.5, "count":0}'); + tdSql.checkData(8, 0, '{"lower_bin":-0.5, "upper_bin":0, "count":0}'); + tdSql.checkData(9, 0, '{"lower_bin":-1, "upper_bin":-0.5, "count":0}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 4, "width": -0.5, "count": 10, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(10); + tdSql.checkData(0, 0, '{"lower_bin":3.5, "upper_bin":4, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":3.5, "count":1}'); + tdSql.checkData(2, 0, '{"lower_bin":2.5, "upper_bin":3, "count":0}'); + tdSql.checkData(3, 0, '{"lower_bin":2, "upper_bin":2.5, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":1.5, "upper_bin":2, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":1, "upper_bin":1.5, "count":1}'); + tdSql.checkData(6, 0, '{"lower_bin":0.5, "upper_bin":1, "count":0}'); + tdSql.checkData(7, 0, '{"lower_bin":0, "upper_bin":0.5, "count":0}'); + tdSql.checkData(8, 0, '{"lower_bin":-0.5, "upper_bin":0, "count":0}'); + tdSql.checkData(9, 0, '{"lower_bin":-1, "upper_bin":-0.5, "count":0}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 4, "width": -0.5, "count": 10, "infinity": false}\', 0) from tb;') + tdSql.checkRows(10); + tdSql.checkData(0, 0, '{"lower_bin":3.5, "upper_bin":4, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":3.5, "count":1}'); + tdSql.checkData(2, 0, '{"lower_bin":2.5, "upper_bin":3, "count":0}'); + tdSql.checkData(3, 0, '{"lower_bin":2, "upper_bin":2.5, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":1.5, "upper_bin":2, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":1, "upper_bin":1.5, "count":1}'); + tdSql.checkData(6, 0, '{"lower_bin":0.5, "upper_bin":1, "count":0}'); + tdSql.checkData(7, 0, '{"lower_bin":0, "upper_bin":0.5, "count":0}'); + tdSql.checkData(8, 0, '{"lower_bin":-0.5, "upper_bin":0, "count":0}'); + tdSql.checkData(9, 0, '{"lower_bin":-1, "upper_bin":-0.5, "count":0}'); + + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 1, "width": 0.5, "count": 1.9999, "infinity": false}\', 0) from stb;') + tdSql.checkRows(1); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":1.5, "count":1}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 1, "width": 0.5, "count": 1.9999, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(1); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":1.5, "count":1}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 1, "width": 0.5, "count": 1.9999, "infinity": false}\', 0) from tb;') + tdSql.checkRows(1); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":1.5, "count":1}'); + + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 1, "width": 0.5, "count": 1.99999999999999999, "infinity": false}\', 0) from stb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":1.5, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":1.5, "upper_bin":2, "count":0}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 1, "width": 0.5, "count": 1.99999999999999999, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":1.5, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":1.5, "upper_bin":2, "count":0}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 1, "width": 0.5, "count": 1.99999999999999999, "infinity": false}\', 0) from tb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":1.5, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":1.5, "upper_bin":2, "count":0}'); + + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 0, "width": 5, "count": 5, "infinity": true}\', 0) from stb;') + tdSql.checkRows(7); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":0, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":0, "upper_bin":5, "count":4}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(3, 0, '{"lower_bin":10, "upper_bin":15, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":15, "upper_bin":20, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":20, "upper_bin":25, "count":1}'); + tdSql.checkData(6, 0, '{"lower_bin":25, "upper_bin":inf, "count":1}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 0, "width": 5, "count": 5, "infinity": true}\', 0) from ctb;') + tdSql.checkRows(7); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":0, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":0, "upper_bin":5, "count":4}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(3, 0, '{"lower_bin":10, "upper_bin":15, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":15, "upper_bin":20, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":20, "upper_bin":25, "count":1}'); + tdSql.checkData(6, 0, '{"lower_bin":25, "upper_bin":inf, "count":1}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 0, "width": 5, "count": 5, "infinity": true}\', 0) from tb;') + tdSql.checkRows(7); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":0, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":0, "upper_bin":5, "count":4}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(3, 0, '{"lower_bin":10, "upper_bin":15, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":15, "upper_bin":20, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":20, "upper_bin":25, "count":1}'); + tdSql.checkData(6, 0, '{"lower_bin":25, "upper_bin":inf, "count":1}'); + + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 10, "width": -5, "count": 3, "infinity": true}\', 0) from stb;') + tdSql.checkRows(5); + tdSql.checkData(0, 0, '{"lower_bin":10, "upper_bin":inf, "count":4}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(2, 0, '{"lower_bin":0, "upper_bin":5, "count":4}'); + tdSql.checkData(3, 0, '{"lower_bin":-5, "upper_bin":0, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":-inf, "upper_bin":-5, "count":1}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 10, "width": -5, "count": 3, "infinity": true}\', 0) from ctb;') + tdSql.checkRows(5); + tdSql.checkData(0, 0, '{"lower_bin":10, "upper_bin":inf, "count":4}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(2, 0, '{"lower_bin":0, "upper_bin":5, "count":4}'); + tdSql.checkData(3, 0, '{"lower_bin":-5, "upper_bin":0, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":-inf, "upper_bin":-5, "count":1}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 10, "width": -5, "count": 3, "infinity": true}\', 0) from tb;') + tdSql.checkRows(5); + tdSql.checkData(0, 0, '{"lower_bin":10, "upper_bin":inf, "count":4}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(2, 0, '{"lower_bin":0, "upper_bin":5, "count":4}'); + tdSql.checkData(3, 0, '{"lower_bin":-5, "upper_bin":0, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":-inf, "upper_bin":-5, "count":1}'); + + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": -1.76e+308, "width": 5, "count": 1, "infinity": true}\', 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-1.76e+308, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-1.76e+308, "upper_bin":-1.76e+308, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-1.76e+308, "upper_bin":inf, "count":15}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": -1.76e+308, "width": 5, "count": 1, "infinity": true}\', 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-1.76e+308, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-1.76e+308, "upper_bin":-1.76e+308, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-1.76e+308, "upper_bin":inf, "count":15}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": -1.76e+308, "width": 5, "count": 1, "infinity": true}\', 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-1.76e+308, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-1.76e+308, "upper_bin":-1.76e+308, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-1.76e+308, "upper_bin":inf, "count":15}'); + + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": -0.7e+308, "width": 0.7e+308, "count": 2, "infinity": false}\', 0) from stb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":-7e+307, "upper_bin":0, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":0, "upper_bin":7e+307, "count":13}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": -0.7e+308, "width": 0.7e+308, "count": 2, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":-7e+307, "upper_bin":0, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":0, "upper_bin":7e+307, "count":13}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": -0.7e+308, "width": 0.7e+308, "count": 2, "infinity": false}\', 0) from tb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":-7e+307, "upper_bin":0, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":0, "upper_bin":7e+307, "count":13}'); + + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": -0.7e+308, "width": 0.7e+308, "count": 2, "infinity": true}\', 0) from stb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-7e+307, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-7e+307, "upper_bin":0, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":0, "upper_bin":7e+307, "count":13}'); + tdSql.checkData(3, 0, '{"lower_bin":7e+307, "upper_bin":inf, "count":0}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": -0.7e+308, "width": 0.7e+308, "count": 2, "infinity": true}\', 0) from ctb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-7e+307, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-7e+307, "upper_bin":0, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":0, "upper_bin":7e+307, "count":13}'); + tdSql.checkData(3, 0, '{"lower_bin":7e+307, "upper_bin":inf, "count":0}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": -0.7e+308, "width": 0.7e+308, "count": 2, "infinity": true}\', 0) from tb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-7e+307, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-7e+307, "upper_bin":0, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":0, "upper_bin":7e+307, "count":13}'); + tdSql.checkData(3, 0, '{"lower_bin":7e+307, "upper_bin":inf, "count":0}'); + + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"width":2, "start": 0, "count": 4, "infinity": false}\', 0) from stb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":6, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":6, "upper_bin":8, "count":2}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"width":2, "start": 0, "count": 4, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":6, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":6, "upper_bin":8, "count":2}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"width":2, "start": 0, "count": 4, "infinity": false}\', 0) from tb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":6, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":6, "upper_bin":8, "count":2}'); + + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"width":2, "start": 0, "count": 4, "infinity": false}\', 0) from stb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":6, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":6, "upper_bin":8, "count":2}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"width":2, "start": 0, "count": 4, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":6, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":6, "upper_bin":8, "count":2}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"width":2, "start": 0, "count": 4, "infinity": false}\', 0) from tb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":6, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":6, "upper_bin":8, "count":2}'); + + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"count": 4, "width":2, "start": 0, "infinity": false}\', 0) from stb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":6, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":6, "upper_bin":8, "count":2}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"count": 4, "width":2, "start": 0, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":6, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":6, "upper_bin":8, "count":2}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"count": 4, "width":2, "start": 0, "infinity": false}\', 0) from tb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":6, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":6, "upper_bin":8, "count":2}'); + + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"infinity": false, "width":2, "start": 0, "count": 4}\', 0) from stb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":6, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":6, "upper_bin":8, "count":2}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"infinity": false, "width":2, "start": 0, "count": 4}\', 0) from ctb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":6, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":6, "upper_bin":8, "count":2}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"infinity": false, "width":2, "start": 0, "count": 4}\', 0) from tb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":6, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":6, "upper_bin":8, "count":2}'); + + #ERROR CASE + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": true, "width": 5, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": true, "width": 5, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": true, "width": 5, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": false, "width": 5, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": false, "width": 5, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": false, "width": 5, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": "abc", "width": 5, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": "abc", "width": 5, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": "abc", "width": 5, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": "中文", "width": 5, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": "中文", "width": 5, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": "中文", "width": 5, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": abc, "width": 5, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": abc, "width": 5, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": abc, "width": 5, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": -1.80e+308, "width": 5, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": -1.80e+308, "width": 5, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": -1.80e+308, "width": 5, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 1.80e+308, "width": 5, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 1.80e+308, "width": 5, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 1.80e+308, "width": 5, "count": 5, "infinity": false}\', 0) from tb;') + + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": true, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": true, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": true, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": false, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": false, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": false, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": "abc", "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": "abc", "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": "abc", "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": "中文", "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": "中文", "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": "中文", "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": abc, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": abc, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": abc, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 0, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 0, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 0, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": -1.80e+308, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": -1.80e+308, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": -1.80e+308, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1.80e+308, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1.80e+308, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1.80e+308, "count": 5, "infinity": false}\', 0) from tb;') + + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": -1.4e+308, "width": 1.4e+308, "count": 3, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": -1.4e+308, "width": 1.4e+308, "count": 3, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": -1.4e+308, "width": 1.4e+308, "count": 3, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": -1.4e+308, "width": 1.4e+308, "count": 3, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": -1.4e+308, "width": 1.4e+308, "count": 3, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": -1.4e+308, "width": 1.4e+308, "count": 3, "infinity": true}\', 0) from tb;') + + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": -1, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": -1, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": -1, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 0, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 0, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 0, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1001, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1001, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1001, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": true, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": true, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": true, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": false, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": false, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": false, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": "abc", "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": "abc", "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": "abc", "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": "中文", "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": "中文", "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": "中文", "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": abc, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": abc, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": abc, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1.8e+308, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1.8e+308, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1.8e+308, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": -1.8e+308, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": -1.8e+308, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": -1.8e+308, "infinity": true}\', 0) from tb;') + + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": 1}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": 1}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": 1}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": 0}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": 0}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": 0}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": -1.5}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": -1.5}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": -1.5}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": 1.8e+308}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": 1.8e+308}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": 1.8e+308}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": "abc"}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": "abc"}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": "abc"}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": "中文"}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": "中文"}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": "中文"}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": abc}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": abc}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": abc}\', 0) from tb;') + + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"begin": 0, "width": 1, "count": 1, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"begin": 0, "width": 1, "count": 1, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"begin": 0, "width": 1, "count": 1, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "factor": 1, "cnt": 1, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "factor": 1, "cnt": 1, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "factor": 1, "cnt": 1, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "inf": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "inf": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "inf": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{start: 0, width: 1, count: 1, infinity: true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{start: 0, width: 1, count: 1, infinity: true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{start: 0, width: 1, count: 1, infinity: true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'[ 0, 1, 1, true]\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'[ 0, 1, 1, true]\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'[ 0, 1, 1, true]\', 0) from tb;') + + ## log_bin ## + #INTEGER + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": 3, "count": 6, "infinity": false}\', 0) from stb;') + tdSql.checkRows(6); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":9, "count":6}'); + tdSql.checkData(2, 0, '{"lower_bin":9, "upper_bin":27, "count":3}'); + tdSql.checkData(3, 0, '{"lower_bin":27, "upper_bin":81, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":81, "upper_bin":243, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":243, "upper_bin":729, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": 3, "count": 6, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(6); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":9, "count":6}'); + tdSql.checkData(2, 0, '{"lower_bin":9, "upper_bin":27, "count":3}'); + tdSql.checkData(3, 0, '{"lower_bin":27, "upper_bin":81, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":81, "upper_bin":243, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":243, "upper_bin":729, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": 3, "count": 6, "infinity": false}\', 0) from tb;') + tdSql.checkRows(6); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":9, "count":6}'); + tdSql.checkData(2, 0, '{"lower_bin":9, "upper_bin":27, "count":3}'); + tdSql.checkData(3, 0, '{"lower_bin":27, "upper_bin":81, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":81, "upper_bin":243, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":243, "upper_bin":729, "count":0}'); + + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": -1.0, "factor": 3.0, "count": 6, "infinity": false}\', 0) from stb;') + tdSql.checkRows(6); + tdSql.checkData(0, 0, '{"lower_bin":-3, "upper_bin":-1, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":-9, "upper_bin":-3, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-27, "upper_bin":-9, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":-81, "upper_bin":-27, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":-243, "upper_bin":-81, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":-729, "upper_bin":-243, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": -1.0, "factor": 3.0, "count": 6, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(6); + tdSql.checkData(0, 0, '{"lower_bin":-3, "upper_bin":-1, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":-9, "upper_bin":-3, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-27, "upper_bin":-9, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":-81, "upper_bin":-27, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":-243, "upper_bin":-81, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":-729, "upper_bin":-243, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": -1.0, "factor": 3.0, "count": 6, "infinity": false}\', 0) from tb;') + tdSql.checkRows(6); + tdSql.checkData(0, 0, '{"lower_bin":-3, "upper_bin":-1, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":-9, "upper_bin":-3, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-27, "upper_bin":-9, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":-81, "upper_bin":-27, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":-243, "upper_bin":-81, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":-729, "upper_bin":-243, "count":0}'); + + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 10, "factor": 0.5, "count": 6, "infinity": false}\', 0) from stb;') + tdSql.checkRows(6); + tdSql.checkData(0, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(1, 0, '{"lower_bin":2.5, "upper_bin":5, "count":3}'); + tdSql.checkData(2, 0, '{"lower_bin":1.25, "upper_bin":2.5, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":0.625, "upper_bin":1.25, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":0.3125, "upper_bin":0.625, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":0.15625, "upper_bin":0.3125, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 10, "factor": 0.5, "count": 6, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(6); + tdSql.checkData(0, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(1, 0, '{"lower_bin":2.5, "upper_bin":5, "count":3}'); + tdSql.checkData(2, 0, '{"lower_bin":1.25, "upper_bin":2.5, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":0.625, "upper_bin":1.25, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":0.3125, "upper_bin":0.625, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":0.15625, "upper_bin":0.3125, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 10, "factor": 0.5, "count": 6, "infinity": false}\', 0) from tb;') + tdSql.checkRows(6); + tdSql.checkData(0, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(1, 0, '{"lower_bin":2.5, "upper_bin":5, "count":3}'); + tdSql.checkData(2, 0, '{"lower_bin":1.25, "upper_bin":2.5, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":0.625, "upper_bin":1.25, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":0.3125, "upper_bin":0.625, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":0.15625, "upper_bin":0.3125, "count":0}'); + + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": -10, "factor": 0.5, "count": 6, "infinity": false}\', 0) from stb;') + tdSql.checkRows(6); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":-5, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":-5, "upper_bin":-2.5, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-2.5, "upper_bin":-1.25, "count":0}'); + tdSql.checkData(3, 0, '{"lower_bin":-1.25, "upper_bin":-0.625, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":-0.625, "upper_bin":-0.3125, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":-0.3125, "upper_bin":-0.15625, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": -10, "factor": 0.5, "count": 6, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(6); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":-5, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":-5, "upper_bin":-2.5, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-2.5, "upper_bin":-1.25, "count":0}'); + tdSql.checkData(3, 0, '{"lower_bin":-1.25, "upper_bin":-0.625, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":-0.625, "upper_bin":-0.3125, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":-0.3125, "upper_bin":-0.15625, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": -10, "factor": 0.5, "count": 6, "infinity": false}\', 0) from tb;') + tdSql.checkRows(6); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":-5, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":-5, "upper_bin":-2.5, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-2.5, "upper_bin":-1.25, "count":0}'); + tdSql.checkData(3, 0, '{"lower_bin":-1.25, "upper_bin":-0.625, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":-0.625, "upper_bin":-0.3125, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":-0.3125, "upper_bin":-0.15625, "count":0}'); + + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 2, "factor": 1.5, "count": 6, "infinity": false}\', 0) from stb;') + tdSql.checkRows(6); + tdSql.checkData(0, 0, '{"lower_bin":2, "upper_bin":3, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":4.5, "count":1}'); + tdSql.checkData(2, 0, '{"lower_bin":4.5, "upper_bin":6.75, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":6.75, "upper_bin":10.125, "count":4}'); + tdSql.checkData(4, 0, '{"lower_bin":10.125, "upper_bin":15.1875, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":15.1875, "upper_bin":22.7812, "count":1}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 2, "factor": 1.5, "count": 6, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(6); + tdSql.checkData(0, 0, '{"lower_bin":2, "upper_bin":3, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":4.5, "count":1}'); + tdSql.checkData(2, 0, '{"lower_bin":4.5, "upper_bin":6.75, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":6.75, "upper_bin":10.125, "count":4}'); + tdSql.checkData(4, 0, '{"lower_bin":10.125, "upper_bin":15.1875, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":15.1875, "upper_bin":22.7812, "count":1}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 2, "factor": 1.5, "count": 6, "infinity": false}\', 0) from tb;') + tdSql.checkRows(6); + tdSql.checkData(0, 0, '{"lower_bin":2, "upper_bin":3, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":4.5, "count":1}'); + tdSql.checkData(2, 0, '{"lower_bin":4.5, "upper_bin":6.75, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":6.75, "upper_bin":10.125, "count":4}'); + tdSql.checkData(4, 0, '{"lower_bin":10.125, "upper_bin":15.1875, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":15.1875, "upper_bin":22.7812, "count":1}'); + + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 3.2, "factor": 0.5, "count": 1.9999, "infinity": false}\', 0) from stb;') + tdSql.checkRows(1); + tdSql.checkData(0, 0, '{"lower_bin":1.6, "upper_bin":3.2, "count":2}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 3.2, "factor": 0.5, "count": 1.9999, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(1); + tdSql.checkData(0, 0, '{"lower_bin":1.6, "upper_bin":3.2, "count":2}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 3.2, "factor": 0.5, "count": 1.9999, "infinity": false}\', 0) from tb;') + tdSql.checkRows(1); + tdSql.checkData(0, 0, '{"lower_bin":1.6, "upper_bin":3.2, "count":2}'); + + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 3.2, "factor": 0.5, "count": 1.99999999999999999, "infinity": false}\', 0) from stb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1.6, "upper_bin":3.2, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":0.8, "upper_bin":1.6, "count":1}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 3.2, "factor": 0.5, "count": 1.99999999999999999, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1.6, "upper_bin":3.2, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":0.8, "upper_bin":1.6, "count":1}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 3.2, "factor": 0.5, "count": 1.99999999999999999, "infinity": false}\', 0) from tb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1.6, "upper_bin":3.2, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":0.8, "upper_bin":1.6, "count":1}'); + + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": 5, "count": 3, "infinity": true}\', 0) from stb;') + tdSql.checkRows(5); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":1, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":1, "upper_bin":5, "count":4}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":25, "count":7}'); + tdSql.checkData(3, 0, '{"lower_bin":25, "upper_bin":125, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":125, "upper_bin":inf, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": 5, "count": 3, "infinity": true}\', 0) from ctb;') + tdSql.checkRows(5); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":1, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":1, "upper_bin":5, "count":4}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":25, "count":7}'); + tdSql.checkData(3, 0, '{"lower_bin":25, "upper_bin":125, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":125, "upper_bin":inf, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": 5, "count": 3, "infinity": true}\', 0) from tb;') + tdSql.checkRows(5); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":1, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":1, "upper_bin":5, "count":4}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":25, "count":7}'); + tdSql.checkData(3, 0, '{"lower_bin":25, "upper_bin":125, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":125, "upper_bin":inf, "count":0}'); + + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 0.2e+308, "factor": 3.14, "count": 1, "infinity": true}\', 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":2e+307, "count":15}'); + tdSql.checkData(1, 0, '{"lower_bin":2e+307, "upper_bin":6.28e+307, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":6.28e+307, "upper_bin":inf, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 0.2e+308, "factor": 3.14, "count": 1, "infinity": true}\', 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":2e+307, "count":15}'); + tdSql.checkData(1, 0, '{"lower_bin":2e+307, "upper_bin":6.28e+307, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":6.28e+307, "upper_bin":inf, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 0.2e+308, "factor": 3.14, "count": 1, "infinity": true}\', 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":2e+307, "count":15}'); + tdSql.checkData(1, 0, '{"lower_bin":2e+307, "upper_bin":6.28e+307, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":6.28e+307, "upper_bin":inf, "count":0}'); + + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": -2, "factor": 3, "count": 3, "infinity": true}\', 0) from stb;') + tdSql.checkRows(5); + tdSql.checkData(0, 0, '{"lower_bin":-2, "upper_bin":inf, "count":14}'); + tdSql.checkData(1, 0, '{"lower_bin":-6, "upper_bin":-2, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-18, "upper_bin":-6, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":-54, "upper_bin":-18, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":-inf, "upper_bin":-54, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": -2, "factor": 3, "count": 3, "infinity": true}\', 0) from ctb;') + tdSql.checkRows(5); + tdSql.checkData(0, 0, '{"lower_bin":-2, "upper_bin":inf, "count":14}'); + tdSql.checkData(1, 0, '{"lower_bin":-6, "upper_bin":-2, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-18, "upper_bin":-6, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":-54, "upper_bin":-18, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":-inf, "upper_bin":-54, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": -2, "factor": 3, "count": 3, "infinity": true}\', 0) from tb;') + tdSql.checkRows(5); + tdSql.checkData(0, 0, '{"lower_bin":-2, "upper_bin":inf, "count":14}'); + tdSql.checkData(1, 0, '{"lower_bin":-6, "upper_bin":-2, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-18, "upper_bin":-6, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":-54, "upper_bin":-18, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":-inf, "upper_bin":-54, "count":0}'); + + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 10, "factor": 0.5, "count": 3, "infinity": true}\', 0) from stb;') + tdSql.checkRows(5); + tdSql.checkData(0, 0, '{"lower_bin":10, "upper_bin":inf, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(2, 0, '{"lower_bin":2.5, "upper_bin":5, "count":3}'); + tdSql.checkData(3, 0, '{"lower_bin":1.25, "upper_bin":2.5, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":-inf, "upper_bin":1.25, "count":3}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 10, "factor": 0.5, "count": 3, "infinity": true}\', 0) from ctb;') + tdSql.checkRows(5); + tdSql.checkData(0, 0, '{"lower_bin":10, "upper_bin":inf, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(2, 0, '{"lower_bin":2.5, "upper_bin":5, "count":3}'); + tdSql.checkData(3, 0, '{"lower_bin":1.25, "upper_bin":2.5, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":-inf, "upper_bin":1.25, "count":3}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 10, "factor": 0.5, "count": 3, "infinity": true}\', 0) from tb;') + tdSql.checkRows(5); + tdSql.checkData(0, 0, '{"lower_bin":10, "upper_bin":inf, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(2, 0, '{"lower_bin":2.5, "upper_bin":5, "count":3}'); + tdSql.checkData(3, 0, '{"lower_bin":1.25, "upper_bin":2.5, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":-inf, "upper_bin":1.25, "count":3}'); + + #FLOAT + tdSql.query('select histogram(col_float, \'log_bin\', \'{"factor":2, "start": 1, "count": 4, "infinity": false}\', 0) from stb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":8, "count":4}'); + tdSql.checkData(3, 0, '{"lower_bin":8, "upper_bin":16, "count":4}'); + tdSql.query('select histogram(col_float, \'log_bin\', \'{"factor":2, "start": 1, "count": 4, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":8, "count":4}'); + tdSql.checkData(3, 0, '{"lower_bin":8, "upper_bin":16, "count":4}'); + tdSql.query('select histogram(col_float, \'log_bin\', \'{"factor":2, "start": 1, "count": 4, "infinity": false}\', 0) from tb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":8, "count":4}'); + tdSql.checkData(3, 0, '{"lower_bin":8, "upper_bin":16, "count":4}'); + + tdSql.query('select histogram(col_float, \'log_bin\', \'{"count": 4, "factor":2, "start": 1, "infinity": false}\', 0) from stb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":8, "count":4}'); + tdSql.checkData(3, 0, '{"lower_bin":8, "upper_bin":16, "count":4}'); + tdSql.query('select histogram(col_float, \'log_bin\', \'{"count": 4, "factor":2, "start": 1, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":8, "count":4}'); + tdSql.checkData(3, 0, '{"lower_bin":8, "upper_bin":16, "count":4}'); + tdSql.query('select histogram(col_float, \'log_bin\', \'{"count": 4, "factor":2, "start": 1, "infinity": false}\', 0) from tb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":8, "count":4}'); + tdSql.checkData(3, 0, '{"lower_bin":8, "upper_bin":16, "count":4}'); + + tdSql.query('select histogram(col_float, \'log_bin\', \'{"infinity": false, "count": 4, "factor":2, "start": 1}\', 0) from stb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":8, "count":4}'); + tdSql.checkData(3, 0, '{"lower_bin":8, "upper_bin":16, "count":4}'); + tdSql.query('select histogram(col_float, \'log_bin\', \'{"infinity": false, "count": 4, "factor":2, "start": 1}\', 0) from ctb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":8, "count":4}'); + tdSql.checkData(3, 0, '{"lower_bin":8, "upper_bin":16, "count":4}'); + tdSql.query('select histogram(col_float, \'log_bin\', \'{"infinity": false, "count": 4, "factor":2, "start": 1}\', 0) from tb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":8, "count":4}'); + tdSql.checkData(3, 0, '{"lower_bin":8, "upper_bin":16, "count":4}'); + + #ERROR CASE + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": true, "factor": 5, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": true, "factor": 5, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": true, "factor": 5, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": false, "factor": 5, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": false, "factor": 5, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": false, "factor": 5, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": "abc", "factor": 5, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": "abc", "factor": 5, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": "abc", "factor": 5, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": "中文", "factor": 5, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": "中文", "factor": 5, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": "中文", "factor": 5, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": abc, "factor": 5, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": abc, "factor": 5, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": abc, "factor": 5, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": -1.80e+308, "factor": 5, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": -1.80e+308, "factor": 5, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": -1.80e+308, "factor": 5, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1.80e+308, "factor": 5, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1.80e+308, "factor": 5, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1.80e+308, "factor": 5, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 5, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 5, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 5, "count": 5, "infinity": false}\', 0) from tb;') + + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": true, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": true, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": true, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": false, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": false, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": false, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": "abc", "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": "abc", "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": "abc", "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": "中文", "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": "中文", "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": "中文", "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": abc, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": abc, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": abc, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": 1.80e+308, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": 1.80e+308, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": 1.80e+308, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": 0, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": 0, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": 0, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": -5, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": -5, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": -5, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": 1, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": 1, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": 1, "count": 5, "infinity": false}\', 0) from tb;') + + #out of range + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": -1.4e+308, "factor": 1.5, "count": 3, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": -1.4e+308, "factor": 1.5, "count": 3, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": -1.4e+308, "factor": 1.5, "count": 3, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": -1.4e+308, "factor": 1.5, "count": 3, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": -1.4e+308, "factor": 1.5, "count": 3, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": -1.4e+308, "factor": 1.5, "count": 3, "infinity": true}\', 0) from tb;') + + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": -1, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": -1, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": -1, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 0, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 0, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 0, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1001, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1001, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1001, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": true, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": true, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": true, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": false, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": false, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": false, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": "abc", "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": "abc", "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": "abc", "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": "中文", "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": "中文", "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": "中文", "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": abc, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": abc, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": abc, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1.8e+308, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1.8e+308, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1.8e+308, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": -1.8e+308, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": -1.8e+308, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": -1.8e+308, "infinity": true}\', 0) from tb;') + + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": 1}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": 1}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": 1}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": 0}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": 0}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": 0}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": -1.5}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": -1.5}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": -1.5}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": 1.8e+308}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": 1.8e+308}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": 1.8e+308}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": "abc"}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": "abc"}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": "abc"}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": "中文"}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": "中文"}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": "中文"}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": abc}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": abc}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": abc}\', 0) from tb;') + + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"begin": 0, "factor": 1, "count": 1, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"begin": 0, "factor": 1, "count": 1, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"begin": 0, "factor": 1, "count": 1, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "cnt": 1, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "cnt": 1, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "cnt": 1, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "inf": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "inf": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "inf": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{start: 0, factor: 1, count: 1, infinity: true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{start: 0, factor: 1, count: 1, infinity: true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{start: 0, factor: 1, count: 1, infinity: true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'[ 0, 1, 1, true]\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'[ 0, 1, 1, true]\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'[ 0, 1, 1, true]\', 0) from tb;') + + print("============== STEP 3: normalization ================== ") + ## Normalization ## + tdSql.query('select histogram(col_smallint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select histogram(col_smallint, "user_input", "[1,3,5,7]", 1) from ctb;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":0.333333}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":0.333333}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":0.333333}'); + tdSql.query('select histogram(col_smallint, "user_input", "[1,3,5,7]", 1) from tb;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":0.333333}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":0.333333}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":0.333333}'); + + tdSql.query('select histogram(col_int, "user_input", "[1,5,10]", 0) from stb;') + tdSql.checkRows(2) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":5, "count":4}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.query('select histogram(col_int, "user_input", "[1,5,10]", 1) from ctb;') + tdSql.checkRows(2) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":5, "count":0.444444}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":10, "count":0.555556}'); + tdSql.query('select histogram(col_int, "user_input", "[1,5,10]", 1) from tb;') + tdSql.checkRows(2) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":5, "count":0.444444}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":10, "count":0.555556}'); + + tdSql.query('select histogram(col_double, "user_input", "[0,5,11]", 0) from stb;') + tdSql.checkRows(2) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":5, "count":4}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":11, "count":6}'); + tdSql.query('select histogram(col_double, "user_input", "[0,5,11]", 1) from ctb;') + tdSql.checkRows(2) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":5, "count":0.400000}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":11, "count":0.600000}'); + tdSql.query('select histogram(col_double, "user_input", "[0,5,11]", 1) from tb;') + tdSql.checkRows(2) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":5, "count":0.400000}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":11, "count":0.600000}'); + + tdSql.query('select histogram(col_bigint, \'linear_bin\', \'{"start": 1, "width": 5, "count": 2, "infinity": false}\', 0) from stb;') + tdSql.checkRows(2) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":6, "count":5}'); + tdSql.checkData(1, 0, '{"lower_bin":6, "upper_bin":11, "count":4}'); + tdSql.query('select histogram(col_bigint, \'linear_bin\', \'{"start": 1, "width": 5, "count": 2, "infinity": false}\', 1) from ctb;') + tdSql.checkRows(2) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":6, "count":0.555556}'); + tdSql.checkData(1, 0, '{"lower_bin":6, "upper_bin":11, "count":0.444444}'); + tdSql.query('select histogram(col_bigint, \'linear_bin\', \'{"start": 1, "width": 5, "count": 2, "infinity": false}\', 1) from tb;') + tdSql.checkRows(2) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":6, "count":0.555556}'); + tdSql.checkData(1, 0, '{"lower_bin":6, "upper_bin":11, "count":0.444444}'); + + tdSql.query('select histogram(col_int, \'linear_bin\', \'{"start": -10, "width": 5, "count": 3, "infinity": true}\', 0) from stb;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-10, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-10, "upper_bin":-5, "count":1}'); + tdSql.checkData(2, 0, '{"lower_bin":-5, "upper_bin":0, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":0, "upper_bin":5, "count":5}'); + tdSql.checkData(4, 0, '{"lower_bin":5, "upper_bin":inf, "count":8}'); + tdSql.query('select histogram(col_int, \'linear_bin\', \'{"start": -10, "width": 5, "count": 3, "infinity": true}\', 1) from ctb;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-10, "count":0.000000}'); + tdSql.checkData(1, 0, '{"lower_bin":-10, "upper_bin":-5, "count":0.066667}'); + tdSql.checkData(2, 0, '{"lower_bin":-5, "upper_bin":0, "count":0.066667}'); + tdSql.checkData(3, 0, '{"lower_bin":0, "upper_bin":5, "count":0.333333}'); + tdSql.checkData(4, 0, '{"lower_bin":5, "upper_bin":inf, "count":0.533333}'); + tdSql.query('select histogram(col_int, \'linear_bin\', \'{"start": -10, "width": 5, "count": 3, "infinity": true}\', 1) from tb;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-10, "count":0.000000}'); + tdSql.checkData(1, 0, '{"lower_bin":-10, "upper_bin":-5, "count":0.066667}'); + tdSql.checkData(2, 0, '{"lower_bin":-5, "upper_bin":0, "count":0.066667}'); + tdSql.checkData(3, 0, '{"lower_bin":0, "upper_bin":5, "count":0.333333}'); + tdSql.checkData(4, 0, '{"lower_bin":5, "upper_bin":inf, "count":0.533333}'); + + tdSql.query('select histogram(col_float, \'log_bin\', \'{"start": 1, "factor": 5, "count": 3, "infinity": false}\', 0) from stb;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":5, "count":4}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":25, "count":8}'); + tdSql.checkData(2, 0, '{"lower_bin":25, "upper_bin":125, "count":1}'); + tdSql.query('select histogram(col_float, \'log_bin\', \'{"start": 1, "factor": 5, "count": 3, "infinity": false}\', 1) from ctb;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":5, "count":0.307692}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":25, "count":0.615385}'); + tdSql.checkData(2, 0, '{"lower_bin":25, "upper_bin":125, "count":0.076923}'); + tdSql.query('select histogram(col_float, \'log_bin\', \'{"start": 1, "factor": 5, "count": 3, "infinity": false}\', 1) from tb;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":5, "count":0.307692}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":25, "count":0.615385}'); + tdSql.checkData(2, 0, '{"lower_bin":25, "upper_bin":125, "count":0.076923}'); + + tdSql.query('select histogram(col_double, \'log_bin\', \'{"start": -0.5, "factor": 0.5, "count": 2, "infinity": false}\', 0) from stb;') + tdSql.checkRows(2) + tdSql.checkData(0, 0, '{"lower_bin":-0.5, "upper_bin":-0.25, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-0.25, "upper_bin":-0.125, "count":0}'); + tdSql.query('select histogram(col_double, \'log_bin\', \'{"start": -0.5, "factor": 0.5, "count": 2, "infinity": false}\', 1) from ctb;') + tdSql.checkRows(2) + tdSql.checkData(0, 0, '{"lower_bin":-0.5, "upper_bin":-0.25, "count":0.000000}'); + tdSql.checkData(1, 0, '{"lower_bin":-0.25, "upper_bin":-0.125, "count":0.000000}'); + tdSql.query('select histogram(col_double, \'log_bin\', \'{"start": -0.5, "factor": 0.5, "count": 2, "infinity": false}\', 1) from tb;') + tdSql.checkRows(2) + tdSql.checkData(0, 0, '{"lower_bin":-0.5, "upper_bin":-0.25, "count":0.000000}'); + tdSql.checkData(1, 0, '{"lower_bin":-0.25, "upper_bin":-0.125, "count":0.000000}'); + + tdSql.query('select histogram(col_double, \'log_bin\', \'{"start": -0.5, "factor": 0.5, "count": 2, "infinity": true}\', 0) from stb;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-0.5, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":-0.5, "upper_bin":-0.25, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-0.25, "upper_bin":-0.125, "count":0}'); + tdSql.checkData(3, 0, '{"lower_bin":-0.125, "upper_bin":inf, "count":13}'); + tdSql.query('select histogram(col_double, \'log_bin\', \'{"start": -0.5, "factor": 0.5, "count": 2, "infinity": true}\', 1) from ctb;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-0.5, "count":0.133333}'); + tdSql.checkData(1, 0, '{"lower_bin":-0.5, "upper_bin":-0.25, "count":0.000000}'); + tdSql.checkData(2, 0, '{"lower_bin":-0.25, "upper_bin":-0.125, "count":0.000000}'); + tdSql.checkData(3, 0, '{"lower_bin":-0.125, "upper_bin":inf, "count":0.866667}'); + tdSql.query('select histogram(col_double, \'log_bin\', \'{"start": -0.5, "factor": 0.5, "count": 2, "infinity": true}\', 1) from tb;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-0.5, "count":0.133333}'); + tdSql.checkData(1, 0, '{"lower_bin":-0.5, "upper_bin":-0.25, "count":0.000000}'); + tdSql.checkData(2, 0, '{"lower_bin":-0.25, "upper_bin":-0.125, "count":0.000000}'); + tdSql.checkData(3, 0, '{"lower_bin":-0.125, "upper_bin":inf, "count":0.866667}'); + + #ERROR CASE + tdSql.error('select histogram(col_smallint, "user_input", "[1,3,5,7]", -10) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 2) from ctb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 3.14) from tb;') + + tdSql.error('select histogram(col_bigint, \'linear_bin\', \'{"start": 1, "width": 5, "count": 2, "infinity": false}\', true) from stb;') + tdSql.error('select histogram(col_bigint, \'linear_bin\', \'{"start": 1, "width": 5, "count": 2, "infinity": false}\', false) from ctb;') + + tdSql.error('select histogram(col_double, \'log_bin\', \'{"start": -0.5, "factor": 0.5, "count": 2, "infinity": true}\', "abc") from tb;') + tdSql.error('select histogram(col_double, \'log_bin\', \'{"start": -0.5, "factor": 0.5, "count": 2, "infinity": true}\', abc) from tb;') + + print("============== STEP 4: combinations ================== ") + ## Combinations ## + #select distinct func(col_name) + tdSql.error('select distinct histogram(col_tinyint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(col_smallint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(col_int, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(col_bigint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(col_float, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(col_double, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(col_bool, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(col_timestamp, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(col_nchar, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(col_binary, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(tag_tinyint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(tag_smallint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(tag_int, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(tag_bigint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(tag_float, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(tag_double, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(tag_bool, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(tag_timestamp, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(tag_nchar, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(tag_binary, "user_input", "[1,3,5,7]", 0) from stb;') + + tdSql.error('select histogram(*, "user_input", "[1,3,5,7]", 0) from stb;') + + #select func(col_name arith_oper xxx) + tdSql.error('select histogram(col_int + 1, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_int - 1, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_int * 2.0, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_int / 2.0, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_int % 2.0, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_timestamp + now, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_int + col_bigint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_int - col_bigint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_int * col_bigint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_int / col_bigint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_int % col_bigint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_int + pow(1,2), "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_int - abs(-100), "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_int * round(col_float), "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_int / ceil(1.5), "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_int % floor(col_double), "user_input", "[1,3,5,7]", 0) from stb;') + + #select func() arith_oper xxx + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) + 1 from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) - 1 from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) * 1 from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) / 1 from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) % 1 from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) + col_double from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) - col_double from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) * col_double from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) / col_double from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) % col_double from stb;') + + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) + abs(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) - ceil(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) * floor(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) / round(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) % acos(col_double) from stb;') + + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) + max(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) - min(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) * first(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) / last(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) % top(col_double, 1) from stb;') + + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) + sum(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) - avg(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) * count(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) / stddev(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) % twa(col_double) from stb;') + + #select func(),xxx + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),col_tinyint from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),col_smallint from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),col_int from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),col_bigint from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),col_timstamp from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),col_bool from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),col_float from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),col_double from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),col_binary from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),col_nchar from stb;') + + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_tinyint from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_smallint from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_int from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_bigint from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_timstamp from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_bool from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_float from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_double from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_binary from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_nchar from stb;') + + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_tinyint from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_smallint from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_int from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_bigint from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_timstamp from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_bool from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_float from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_double from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_binary from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_nchar from stb;') + + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),ts from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tbname from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),_c0 from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),_C0 from stb;') + + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),abs(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),ceil(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),floor(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),round(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),acos(col_double) from stb;') + + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),max(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),min(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),first(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),last(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),top(col_double, 1) from stb;') + + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),sum(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),avg(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),count(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),stddev(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),twa(col_double) from stb;') + + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),histogram(col_int, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),histogram(col_int, "linear_bin", \'{"start": -1, "width":5, "count":5, "infinity":false}\', 0) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),histogram(col_int, "log_bin", \'{"start": 10, "factor":0.5, "count":5, "infinity":false}\', 0) from stb;') + + #select where condition + tdSql.query('select histogram(col_int, "user_input", "[1,3,5,7,9,15]", 0) from tb;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[1,3,5,7,9,15]", 0) from tb where col_int > 3;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[1,3,5,7,9,15]", 0) from tb where col_int < 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + tdSql.query('select histogram(col_int, "user_input", "[1,3,5,7,9,15]", 0) from tb where col_int >= 3;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[1,3,5,7,9,15]", 0) from tb where col_int <= 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + tdSql.query('select histogram(col_int, "user_input", "[1,3,5,7,9,15]", 0) from tb where col_int = 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + tdSql.query('select histogram(col_int, "user_input", "[1,3,5,7,9,15]", 0) from tb where col_int != 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[1,3,5,7,9,15]", 0) from tb where col_int <> 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[1,3,5,7,9,15]", 0) from tb where col_int > 5 and col_int <7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + tdSql.query('select histogram(col_int, "user_input", "[1,3,5,7,9,15]", 0) from tb where col_int >= 5 and col_int <=7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":1}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + tdSql.query('select histogram(col_int, "user_input", "[1,3,5,7,9,15]", 0) from tb where col_int between 5 and 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":1}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5,7,9,15]", 0) from ctb;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5,7,9,15]", 0) from ctb where col_tinyint > 3;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5,7,9,15]", 0) from ctb where col_tinyint < 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5,7,9,15]", 0) from ctb where col_tinyint >= 3;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5,7,9,15]", 0) from ctb where col_tinyint <= 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5,7,9,15]", 0) from ctb where col_tinyint = 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5,7,9,15]", 0) from ctb where col_tinyint != 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5,7,9,15]", 0) from ctb where col_tinyint <> 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5,7,9,15]", 0) from ctb where col_tinyint > 5 and col_tinyint <7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5,7,9,15]", 0) from ctb where col_tinyint >= 5 and col_tinyint <=7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":1}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5,7,9,15]", 0) from ctb where col_tinyint between 5 and 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":1}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where col_bigint > 3;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where col_bigint < 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where col_bigint >= 3;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where col_bigint <= 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where col_bigint = 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where col_bigint != 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where col_bigint <> 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where col_bigint > 5 and col_bigint <7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where col_bigint >= 5 and col_bigint <=7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":1}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where col_bigint between 5 and 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":1}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where tag_bigint > 0;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where tag_bigint < 2;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where tag_bigint >= 1;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where tag_bigint <= 1;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where tag_bigint = 1;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where tag_bigint != 2;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where tag_bigint <> 2;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where tag_bigint > 0 and tag_bigint < 2;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where tag_bigint >= 1 and tag_bigint <= 1;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where tag_bigint between 0 and 2;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + + #select session + tdSql.query('select histogram(col_int, "user_input", "[0,10]", 0) from tb session (col_timestamp, 1w);') + tdSql.checkRows(1) + tdSql.checkData(0, 1, '{"lower_bin":0, "upper_bin":10, "count":10}'); + + tdSql.query('select histogram(col_int, "user_input", "[0,10]", 0) from tb session (col_timestamp, 1d);') + tdSql.checkRows(1) + tdSql.checkData(0, 1, '{"lower_bin":0, "upper_bin":10, "count":10}'); + + tdSql.query('select histogram(col_int, "user_input", "[0,10]", 0) from tb session (col_timestamp, 1h);') + tdSql.checkRows(1) + tdSql.checkData(0, 1, '{"lower_bin":0, "upper_bin":10, "count":10}'); + + tdSql.query('select histogram(col_int, "user_input", "[0,10]", 0) from tb session (col_timestamp, 1m);') + tdSql.checkRows(1) + tdSql.checkData(0, 1, '{"lower_bin":0, "upper_bin":10, "count":10}'); + + #tdSql.query('select histogram(col_int, "user_input", "[0,10]", 0) from tb session (col_timestamp, 1s);') + #tdSql.checkRows(16) + #tdSql.checkData(0, 1, "(0:10]:0"); + #tdSql.checkData(1, 1, "(0:10]:0"); + #tdSql.checkData(2, 1, "(0:10]:1"); + #tdSql.checkData(3, 1, "(0:10]:1"); + #tdSql.checkData(4, 1, "(0:10]:1"); + #tdSql.checkData(5, 1, "(0:10]:1"); + #tdSql.checkData(6, 1, "(0:10]:1"); + #tdSql.checkData(7, 1, "(0:10]:1"); + #tdSql.checkData(8, 1, "(0:10]:1"); + #tdSql.checkData(9, 1, "(0:10]:1"); + #tdSql.checkData(10, 1, "(0:10]:1"); + #tdSql.checkData(11, 1, "(0:10]:1"); + #tdSql.checkData(12, 1, "(0:10]:0"); + #tdSql.checkData(13, 1, "(0:10]:0"); + #tdSql.checkData(14, 1, "(0:10]:0"); + #tdSql.checkData(15, 1, "(0:10]:0"); + + #tdSql.query('select histogram(col_int, "user_input", "[0,10]", 0) from tb session (col_timestamp, 1a);') + #tdSql.checkRows(16) + #tdSql.checkData(0, 1, "(0:10]:0"); + #tdSql.checkData(1, 1, "(0:10]:0"); + #tdSql.checkData(2, 1, "(0:10]:1"); + #tdSql.checkData(3, 1, "(0:10]:1"); + #tdSql.checkData(4, 1, "(0:10]:1"); + #tdSql.checkData(5, 1, "(0:10]:1"); + #tdSql.checkData(6, 1, "(0:10]:1"); + #tdSql.checkData(7, 1, "(0:10]:1"); + #tdSql.checkData(8, 1, "(0:10]:1"); + #tdSql.checkData(9, 1, "(0:10]:1"); + #tdSql.checkData(10, 1, "(0:10]:1"); + #tdSql.checkData(11, 1, "(0:10]:1"); + #tdSql.checkData(12, 1, "(0:10]:0"); + #tdSql.checkData(13, 1, "(0:10]:0"); + #tdSql.checkData(14, 1, "(0:10]:0"); + #tdSql.checkData(15, 1, "(0:10]:0"); + + #select state_window + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from tb state_window(col_timestamp);') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from tb state_window(col_tinyint);') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from tb state_window(col_smallint);') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from tb state_window(col_int);') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from tb state_window(col_bigint);') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from tb state_window(col_bool);') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from tb state_window(col_float);') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from tb state_window(col_double);') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from tb state_window(col_binary);') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from tb state_window(col_nchar);') + + #select interval/sliding/fill + tdSql.query('select histogram(col_int, "user_input", "[0,10]", 0) from tb interval(1y);') + tdSql.checkRows(1) + tdSql.checkData(0, 1, '{"lower_bin":0, "upper_bin":10, "count":10}'); + + tdSql.query('select histogram(col_int, "user_input", "[0,10]", 0) from tb interval(1n);') + tdSql.checkRows(1) + tdSql.checkData(0, 1, '{"lower_bin":0, "upper_bin":10, "count":10}'); + + tdSql.query('select histogram(col_int, "user_input", "[0,10]", 0) from tb interval(1w);') + tdSql.checkRows(1) + tdSql.checkData(0, 1, '{"lower_bin":0, "upper_bin":10, "count":10}'); + + tdSql.query('select histogram(col_int, "user_input", "[0,10]", 0) from tb interval(1d);') + tdSql.checkRows(1) + tdSql.checkData(0, 1, '{"lower_bin":0, "upper_bin":10, "count":10}'); + + tdSql.query('select histogram(col_int, "user_input", "[0,10]", 0) from tb interval(1h);') + tdSql.checkRows(1) + tdSql.checkData(0, 1, '{"lower_bin":0, "upper_bin":10, "count":10}'); + + tdSql.query('select histogram(col_int, "user_input", "[0,10]", 0) from tb interval(1s);') + tdSql.checkRows(16) + tdSql.checkData(0, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + tdSql.checkData(1, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + tdSql.checkData(2, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(3, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(4, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(5, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(6, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(7, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(8, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(9, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(10, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(11, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(12, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + tdSql.checkData(13, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + tdSql.checkData(14, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + tdSql.checkData(15, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + tdSql.query('select histogram(col_int, "user_input", "[0,10]", 0) from tb interval(1a);') + tdSql.checkRows(16) + tdSql.checkData(0, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + tdSql.checkData(1, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + tdSql.checkData(2, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(3, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(4, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(5, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(6, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(7, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(8, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(9, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(10, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(11, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(12, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + tdSql.checkData(13, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + tdSql.checkData(14, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + tdSql.checkData(15, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + + tdSql.query('select histogram(col_int, "user_input", "[0,10]", 0) from tb interval(1w) sliding(1w);') + tdSql.checkRows(1) + tdSql.checkData(0, 1, '{"lower_bin":0, "upper_bin":10, "count":10}'); + + tdSql.query('select histogram(col_int, "user_input", "[0,10]", 0) from tb interval(1d) sliding(1d);') + tdSql.checkRows(1) + tdSql.checkData(0, 1, '{"lower_bin":0, "upper_bin":10, "count":10}'); + + tdSql.query('select histogram(col_int, "user_input", "[0,10]", 0) from tb interval(1h) sliding(1h);') + tdSql.checkRows(1) + tdSql.checkData(0, 1, '{"lower_bin":0, "upper_bin":10, "count":10}'); + + tdSql.query('select histogram(col_int, "user_input", "[0,10]", 0) from tb interval(1s) sliding(1s);') + tdSql.checkRows(16) + tdSql.checkData(0, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + tdSql.checkData(1, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + tdSql.checkData(2, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(3, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(4, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(5, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(6, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(7, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(8, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(9, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(10, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(11, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(12, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + tdSql.checkData(13, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + tdSql.checkData(14, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + tdSql.checkData(15, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from tb where col_timestamp > now - 1w and col_timestamp < now + 1w interval(1w) fill(NULL);') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from tb where col_timestamp > now - 1d and col_timestamp < now + 1d interval(1d) fill(None);') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from tb where col_timestamp > now - 1h and col_timestamp < now + 1h interval(1h) fill(Prev);') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from tb where col_timestamp > now - 1m and col_timestamp < now + 1m interval(1m) fill(Next);') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from tb where col_timestamp > now - 1s and col_timestamp < now + 1s interval(1s) fill(Linear);') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from tb where col_timestamp > now - 1a and col_timestamp < now + 1a interval(1a) fill(Value, 1);') + + #select group by + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by col_tinyint;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by col_smallint;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by col_int;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by col_bigint;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by col_bool;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by col_float;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by col_double;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by col_binary;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by col_nchar;') + + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_tinyint;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_smallint;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_int;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_bigint;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_bool;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_float;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_double;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_binary;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_nchar;') + + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tbname;') + + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_tinyint,col_tinyint;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_smallint,col_smallint;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_int,col_int;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_bigint,col_bigint;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_bool,col_bool;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_float,col_float;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_double,col_double;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_binary,col_binary;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_nchar,col_nchar;') + + #select order by + tdSql.query('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_timestamp;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + + tdSql.query('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_timestamp desc;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_tinyint;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_tinyint desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_smallint;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_smallint desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_int;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_int desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_bigint;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_bigint desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_bool;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_bool desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_float;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_float desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_double;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_double desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_double;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_double desc;') + + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_timestamp;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_timestamp desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_tinyint;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_tinyint desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_smallint;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_smallint desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_int;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_int desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_bigint;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_bigint desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_bool;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_bool desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_float;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_float desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_double;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_double desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_double;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_double desc;') + + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tbname;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tbname desc;') + + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_timestamp,col_timestamp;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_timestamp,col_timestamp desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_tinyint,col_timestamp;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_tinyint,col_timestamp desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_smallint,col_timestamp;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_smallint,col_timestamp desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_int,col_timestamp;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_int,col_timestamp desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_bigint,col_timestamp;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_bigint,col_timestamp desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_bool,col_timestamp;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_bool,col_timestamp desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_float,col_timestamp;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_float,col_timestamp desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_double,col_timestamp;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_double,col_timestamp desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_double,col_timestamp;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_double,col_timestamp desc;') + + #select limit/offset + tdSql.query('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb limit 3;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from ctb limit 3;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from tb limit 3;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + + tdSql.query('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb limit 3 offset 2;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from ctb limit 3 offset 2;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from tb limit 3 offset 2;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + + tdSql.query('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb limit 2,3;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from ctb limit 2,3;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from tb limit 2,3;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + + #nested query + tdSql.query('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from (select * from stb);') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from (select * from ctb);') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from (select * from tb);') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + + tdSql.query('select histogram(val, "user_input", "[0,3,5,7,9,15]", 0) from (select col_int as val from stb);') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(val, "user_input", "[0,3,5,7,9,15]", 0) from (select col_int as val from ctb);') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(val, "user_input", "[0,3,5,7,9,15]", 0) from (select col_int as val from tb);') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + + tdSql.query('select * from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb);') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select * from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from ctb)') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select * from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from tb)') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + + tdSql.query('select _c0 from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb);') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select _c0 from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from ctb)') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select _c0 from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from tb)') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + + tdSql.query('select first(_c0) from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb);') + tdSql.checkRows(1) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.query('select first(_c0) from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from ctb)') + tdSql.checkRows(1) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.query('select first(_c0) from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from tb)') + tdSql.checkRows(1) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + + tdSql.query('select last(_c0) from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb);') + tdSql.checkRows(1) + tdSql.checkData(0, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select last(_c0) from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from ctb)') + tdSql.checkRows(1) + tdSql.checkData(0, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select last(_c0) from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from tb)') + tdSql.checkRows(1) + tdSql.checkData(0, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + + + tdSql.query('select _c0 from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb limit 3);') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select _c0 from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from ctb limit 3)') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select _c0 from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from tb limit 3)') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + + tdSql.query('select _c0 from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb) limit 3;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select _c0 from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from ctb) limit 3') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select _c0 from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from tb) limit 3') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + + tdSql.query('select _c0 from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_timestamp);') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select _c0 from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from ctb order by col_timestamp)') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select _c0 from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from tb order by col_timestamp)') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + + #join + tdSql.execute("create stable stb1 (col_timestamp timestamp, col_tinyint tinyint, col_smallint smallint, col_int int, col_bigint bigint, col_float float, col_double double, col_bool bool, col_binary binary(10), col_nchar nchar(10)) \ + tags(tag_timestamp timestamp, tag_tinyint tinyint, tag_smallint smallint, tag_int int, tag_bigint bigint, tag_float float, tag_double double, tag_bool bool, tag_binary binary(10), tag_nchar nchar(10));") + tdSql.execute("create table ctb1 using stb1 tags (now, 1, 1, 1, 1, 1.0, 1.0, true, 'abc', 'abc');") + tdSql.execute("create table tb1 (col_timestamp timestamp, col_tinyint tinyint, col_smallint smallint, col_int int, col_bigint bigint, col_float float, col_double double, col_bool bool, col_binary binary(10), col_nchar nchar(10));") + + tdSql.execute("insert into ctb1 values ('2022-01-01 00:00:00', -9, -9, -9, -9, -9.5, -9.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb1 values ('2022-01-01 00:00:01', -1, -1, -1, -1, -1.5, -1.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb1 values ('2022-01-01 00:00:02', 1, 1, 1, 1, 1.5, 1.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb1 values ('2022-01-01 00:00:03', 2, 2, 2, 2, 2.5, 2.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb1 values ('2022-01-01 00:00:04', 3, 3, 3, 3, 3.5, 3.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb1 values ('2022-01-01 00:00:05', 4, 4, 4, 4, 4.5, 4.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb1 values ('2022-01-01 00:00:06', 5, 5, 5, 5, 5.5, 5.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb1 values ('2022-01-01 00:00:07', 6, 6, 6, 6, 6.5, 6.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb1 values ('2022-01-01 00:00:08', 7, 7, 7, 7, 7.5, 7.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb1 values ('2022-01-01 00:00:09', 8, 8, 8, 8, 8.5, 8.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb1 values ('2022-01-01 00:00:10', 9, 9, 9, 9, 9.5, 9.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb1 values ('2022-01-01 00:00:11', 10, 10, 10, 10, 10.5, 10.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb1 values ('2022-01-01 00:00:12', 15, 15, 15, 15, 15.5, 15.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb1 values ('2022-01-01 00:00:13', 20, 20, 20, 20, 20.5, 20.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb1 values ('2022-01-01 00:00:14', 99, 99, 99, 99, 99.5, 99.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb1 values ('2022-01-01 00:00:15', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);") + + tdSql.execute("insert into tb1 values ('2022-01-01 00:00:00', -9, -9, -9, -9, -9.5, -9.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb1 values ('2022-01-01 00:00:01', -1, -1, -1, -1, -1.5, -1.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb1 values ('2022-01-01 00:00:02', 1, 1, 1, 1, 1.5, 1.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb1 values ('2022-01-01 00:00:03', 2, 2, 2, 2, 2.5, 2.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb1 values ('2022-01-01 00:00:04', 3, 3, 3, 3, 3.5, 3.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb1 values ('2022-01-01 00:00:05', 4, 4, 4, 4, 4.5, 4.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb1 values ('2022-01-01 00:00:06', 5, 5, 5, 5, 5.5, 5.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb1 values ('2022-01-01 00:00:07', 6, 6, 6, 6, 6.5, 6.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb1 values ('2022-01-01 00:00:08', 7, 7, 7, 7, 7.5, 7.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb1 values ('2022-01-01 00:00:09', 8, 8, 8, 8, 8.5, 8.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb1 values ('2022-01-01 00:00:10', 9, 9, 9, 9, 9.5, 9.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb1 values ('2022-01-01 00:00:11', 10, 10, 10, 10, 10.5, 10.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb1 values ('2022-01-01 00:00:12', 15, 15, 15, 15, 15.5, 15.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb1 values ('2022-01-01 00:00:13', 20, 20, 20, 20, 20.5, 20.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb1 values ('2022-01-01 00:00:14', 99, 99, 99, 99, 99.5, 99.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb1 values ('2022-01-01 00:00:15', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);") + + tdSql.execute("create stable stb2 (col_timestamp timestamp, col_tinyint tinyint, col_smallint smallint, col_int int, col_bigint bigint, col_float float, col_double double, col_bool bool, col_binary binary(10), col_nchar nchar(10)) \ + tags(tag_timestamp timestamp, tag_tinyint tinyint, tag_smallint smallint, tag_int int, tag_bigint bigint, tag_float float, tag_double double, tag_bool bool, tag_binary binary(10), tag_nchar nchar(10));") + tdSql.execute("create table ctb2 using stb2 tags (now, 1, 1, 1, 1, 1.0, 1.0, true, 'abc', 'abc');") + tdSql.execute("create table tb2 (col_timestamp timestamp, col_tinyint tinyint, col_smallint smallint, col_int int, col_bigint bigint, col_float float, col_double double, col_bool bool, col_binary binary(10), col_nchar nchar(10));") + + tdSql.execute("insert into ctb2 values ('2022-01-01 00:00:00', -9, -9, -9, -9, -9.5, -9.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb2 values ('2022-01-01 00:00:01', -1, -1, -1, -1, -1.5, -1.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb2 values ('2022-01-01 00:00:02', 1, 1, 1, 1, 1.5, 1.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb2 values ('2022-01-01 00:00:03', 2, 2, 2, 2, 2.5, 2.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb2 values ('2022-01-01 00:00:04', 3, 3, 3, 3, 3.5, 3.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb2 values ('2022-01-01 00:00:05', 4, 4, 4, 4, 4.5, 4.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb2 values ('2022-01-01 00:00:06', 5, 5, 5, 5, 5.5, 5.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb2 values ('2022-01-01 00:00:07', 6, 6, 6, 6, 6.5, 6.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb2 values ('2022-01-01 00:00:08', 7, 7, 7, 7, 7.5, 7.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb2 values ('2022-01-01 00:00:09', 8, 8, 8, 8, 8.5, 8.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb2 values ('2022-01-01 00:00:10', 9, 9, 9, 9, 9.5, 9.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb2 values ('2022-01-01 00:00:11', 10, 10, 10, 10, 10.5, 10.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb2 values ('2022-01-01 00:00:12', 15, 15, 15, 15, 15.5, 15.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb2 values ('2022-01-01 00:00:13', 20, 20, 20, 20, 20.5, 20.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb2 values ('2022-01-01 00:00:14', 99, 99, 99, 99, 99.5, 99.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb2 values ('2022-01-01 00:00:15', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);") + + tdSql.execute("insert into tb2 values ('2022-01-01 00:00:00', -9, -9, -9, -9, -9.5, -9.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb2 values ('2022-01-01 00:00:01', -1, -1, -1, -1, -1.5, -1.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb2 values ('2022-01-01 00:00:02', 1, 1, 1, 1, 1.5, 1.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb2 values ('2022-01-01 00:00:03', 2, 2, 2, 2, 2.5, 2.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb2 values ('2022-01-01 00:00:04', 3, 3, 3, 3, 3.5, 3.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb2 values ('2022-01-01 00:00:05', 4, 4, 4, 4, 4.5, 4.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb2 values ('2022-01-01 00:00:06', 5, 5, 5, 5, 5.5, 5.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb2 values ('2022-01-01 00:00:07', 6, 6, 6, 6, 6.5, 6.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb2 values ('2022-01-01 00:00:08', 7, 7, 7, 7, 7.5, 7.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb2 values ('2022-01-01 00:00:09', 8, 8, 8, 8, 8.5, 8.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb2 values ('2022-01-01 00:00:10', 9, 9, 9, 9, 9.5, 9.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb2 values ('2022-01-01 00:00:11', 10, 10, 10, 10, 10.5, 10.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb2 values ('2022-01-01 00:00:12', 15, 15, 15, 15, 15.5, 15.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb2 values ('2022-01-01 00:00:13', 20, 20, 20, 20, 20.5, 20.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb2 values ('2022-01-01 00:00:14', 99, 99, 99, 99, 99.5, 99.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb2 values ('2022-01-01 00:00:15', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);") + + tdSql.query('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from tb1, tb2 where tb1.col_timestamp = tb2.col_timestamp;'); + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + + tdSql.query('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from ctb1, ctb2 where ctb1.col_timestamp = ctb2.col_timestamp;'); + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + + #stable join will cause crash + #tdSql.query('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb1, stb2 where stb1.col_timestamp = stb2.col_timestamp and stb1.tag_int = stb2.tag_int;'); + #tdSql.checkRows(5) + #tdSql.checkData(0, 0, "(0:3]:3"); + #tdSql.checkData(1, 0, "(3:5]:2"); + #tdSql.checkData(2, 0, "(5:7]:2"); + #tdSql.checkData(3, 0, "(7:9]:2"); + #tdSql.checkData(4, 0, "(9:15]:2"); + + #union all + tdSql.query('select histogram(col_int, \'user_input\', \'[1,3,5]\', 0) from tb1 union all select histogram(col_int, \'user_input\', \'[1,3,5]\', 0) from tb2;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.query('select histogram(col_int, \'user_input\', \'[1,3,5]\', 0) from ctb1 union all select histogram(col_int, \'user_input\', \'[1,3,5]\', 0) from ctb2;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.query('select histogram(col_int, \'user_input\', \'[1,3,5]\', 0) from stb1 union all select histogram(col_int, \'user_input\', \'[1,3,5]\', 0) from stb2;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + + tdSql.query('select histogram(col_int, \'linear_bin\', \'{"start":1, "width":2, "count":2, "infinity":false}\', 0) from tb1 union all select histogram(col_int, \'linear_bin\', \'{"start":1, "width":2, "count":2, "infinity":false}\', 0) from tb2;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.query('select histogram(col_int, \'linear_bin\', \'{"start":1, "width":2, "count":2, "infinity":false}\', 0) from ctb1 union all select histogram(col_int, \'linear_bin\', \'{"start":1, "width":2, "count":2, "infinity":false}\', 0) from ctb2;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.query('select histogram(col_int, \'linear_bin\', \'{"start":1, "width":2, "count":2, "infinity":false}\', 0) from stb1 union all select histogram(col_int, \'linear_bin\', \'{"start":1, "width":2, "count":2, "infinity":false}\', 0) from stb2;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + + tdSql.query('select histogram(col_int, \'log_bin\', \'{"start":1, "factor":2, "count":2, "infinity":false}\', 0) from tb1 union all select histogram(col_int, \'log_bin\', \'{"start":1, "factor":2, "count":2, "infinity":false}\', 0) from tb2;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":1, "upper_bin":2, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.query('select histogram(col_int, \'log_bin\', \'{"start":1, "factor":2, "count":2, "infinity":false}\', 0) from ctb1 union all select histogram(col_int, \'log_bin\', \'{"start":1, "factor":2, "count":2, "infinity":false}\', 0) from ctb2;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":1, "upper_bin":2, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.query('select histogram(col_int, \'log_bin\', \'{"start":1, "factor":2, "count":2, "infinity":false}\', 0) from stb1 union all select histogram(col_int, \'log_bin\', \'{"start":1, "factor":2, "count":2, "infinity":false}\', 0) from stb2;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":1, "upper_bin":2, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + + + tdSql.execute('drop database db') + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/hyperloglog.py b/tests/system-test/2-query/hyperloglog.py new file mode 100644 index 0000000000000000000000000000000000000000..35703e441dd3465054d9b7b451c651f906da7e45 --- /dev/null +++ b/tests/system-test/2-query/hyperloglog.py @@ -0,0 +1,361 @@ +import datetime + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +PRIMARY_COL = "ts" + +INT_COL = "c1" +BINT_COL = "c2" +SINT_COL = "c3" +TINT_COL = "c4" +FLOAT_COL = "c5" +DOUBLE_COL = "c6" +BOOL_COL = "c7" + +BINARY_COL = "c8" +NCHAR_COL = "c9" +TS_COL = "c10" + +NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ] +CHAR_COL = [ BINARY_COL, NCHAR_COL, ] +BOOLEAN_COL = [ BOOL_COL, ] +TS_TYPE_COL = [ TS_COL, ] + +ALL_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BOOL_COL, BINARY_COL, NCHAR_COL, TS_COL ] + +class TDTestCase: + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def __query_condition(self,tbname): + query_condition = [f"cast({col} as bigint)" for col in ALL_COL] + for num_col in NUM_COL: + query_condition.extend( + ( + f"{tbname}.{num_col}", + f"abs( {tbname}.{num_col} )", + f"acos( {tbname}.{num_col} )", + f"asin( {tbname}.{num_col} )", + f"atan( {tbname}.{num_col} )", + f"avg( {tbname}.{num_col} )", + f"ceil( {tbname}.{num_col} )", + f"cos( {tbname}.{num_col} )", + f"count( {tbname}.{num_col} )", + f"floor( {tbname}.{num_col} )", + f"log( {tbname}.{num_col}, {tbname}.{num_col})", + f"max( {tbname}.{num_col} )", + f"min( {tbname}.{num_col} )", + f"pow( {tbname}.{num_col}, 2)", + f"round( {tbname}.{num_col} )", + f"sum( {tbname}.{num_col} )", + f"sin( {tbname}.{num_col} )", + f"sqrt( {tbname}.{num_col} )", + f"tan( {tbname}.{num_col} )", + f"cast( {tbname}.{num_col} as timestamp)", + ) + ) + query_condition.extend((f"{num_col} + {any_col}" for any_col in ALL_COL)) + for char_col in CHAR_COL: + query_condition.extend( + ( + f"count({tbname}.{char_col})", + f"sum(cast({tbname}.{char_col}) as bigint)", + f"max(cast({tbname}.{char_col}) as bigint)", + f"min(cast({tbname}.{char_col}) as bigint)", + f"avg(cast({tbname}.{char_col}) as bigint)", + ) + ) + # query_condition.extend( + # ( + # 1010, + # ) + # ) + + return query_condition + + def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False): + table_reference = tb_list[0] + join_condition = table_reference + join = "inner join" if INNER else "join" + for i in range(len(tb_list[1:])): + join_condition += f" {join} {tb_list[i+1]} on {table_reference}.{filter}={tb_list[i+1]}.{filter}" + + return join_condition + + def __where_condition(self, col=None, tbname=None, query_conditon=None): + if query_conditon and isinstance(query_conditon, str): + if query_conditon.startswith("count"): + query_conditon = query_conditon[6:-1] + elif query_conditon.startswith("max"): + query_conditon = query_conditon[4:-1] + elif query_conditon.startswith("sum"): + query_conditon = query_conditon[4:-1] + elif query_conditon.startswith("min"): + query_conditon = query_conditon[4:-1] + + if query_conditon: + return f" where {query_conditon} is not null" + if col in NUM_COL: + return f" where abs( {tbname}.{col} ) >= 0" + if col in CHAR_COL: + return f" where lower( {tbname}.{col} ) like 'bina%' or lower( {tbname}.{col} ) like '_cha%' " + if col in BOOLEAN_COL: + return f" where {tbname}.{col} in (false, true) " + if col in TS_TYPE_COL or col in PRIMARY_COL: + return f" where cast( {tbname}.{col} as binary(16) ) is not null " + + return "" + + def __group_condition(self, col, having = None): + if isinstance(col, str): + if col.startswith("count"): + col = col[6:-1] + elif col.startswith("max"): + col = col[4:-1] + elif col.startswith("sum"): + col = col[4:-1] + elif col.startswith("min"): + col = col[4:-1] + return f" group by {col} having {having}" if having else f" group by {col} " + + def __single_sql(self, select_clause, from_clause, where_condition="", group_condition=""): + if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0] != from_clause.split(".")[0]: + return + return f"select hyperloglog({select_clause}) from {from_clause} {where_condition} {group_condition}" + + @property + def __tb_list(self): + return [ + "ct1", + "ct4", + "t1", + "ct2", + "stb1", + ] + + def sql_list(self): + sqls = [] + __no_join_tblist = self.__tb_list + for tb in __no_join_tblist: + select_claus_list = self.__query_condition(tb) + for select_claus in select_claus_list: + group_claus = self.__group_condition(col=select_claus) + where_claus = self.__where_condition(query_conditon=select_claus) + having_claus = self.__group_condition(col=select_claus, having=f"{select_claus} is not null") + sqls.extend( + ( + self.__single_sql(select_claus, tb, where_claus, having_claus), + self.__single_sql(select_claus, tb,), + self.__single_sql(select_claus, tb, where_condition=where_claus), + self.__single_sql(select_claus, tb, group_condition=group_claus), + ) + ) + + # return filter(None, sqls) + return list(filter(None, sqls)) + + def __get_type(self, col): + if tdSql.cursor.istype(col, "BOOL"): + return "BOOL" + if tdSql.cursor.istype(col, "INT"): + return "INT" + if tdSql.cursor.istype(col, "BIGINT"): + return "BIGINT" + if tdSql.cursor.istype(col, "TINYINT"): + return "TINYINT" + if tdSql.cursor.istype(col, "SMALLINT"): + return "SMALLINT" + if tdSql.cursor.istype(col, "FLOAT"): + return "FLOAT" + if tdSql.cursor.istype(col, "DOUBLE"): + return "DOUBLE" + if tdSql.cursor.istype(col, "BINARY"): + return "BINARY" + if tdSql.cursor.istype(col, "NCHAR"): + return "NCHAR" + if tdSql.cursor.istype(col, "TIMESTAMP"): + return "TIMESTAMP" + if tdSql.cursor.istype(col, "JSON"): + return "JSON" + if tdSql.cursor.istype(col, "TINYINT UNSIGNED"): + return "TINYINT UNSIGNED" + if tdSql.cursor.istype(col, "SMALLINT UNSIGNED"): + return "SMALLINT UNSIGNED" + if tdSql.cursor.istype(col, "INT UNSIGNED"): + return "INT UNSIGNED" + if tdSql.cursor.istype(col, "BIGINT UNSIGNED"): + return "BIGINT UNSIGNED" + + def spread_check(self): + sqls = self.sql_list() + tdLog.printNoPrefix("===step 1: curent case, must return query OK") + for i in range(len(sqls)): + tdLog.info(f"sql: {sqls[i]}") + tdSql.query(sqls[i]) + + def __test_current(self): + tdSql.query("select hyperloglog(ts) from ct1") + tdSql.checkRows(1) + tdSql.query("select hyperloglog(c1) from ct2") + tdSql.checkRows(1) + tdSql.query("select hyperloglog(c1) from ct4 group by c1") + tdSql.checkRows(self.rows + 3) + tdSql.query("select hyperloglog(c1) from ct4 group by c7") + tdSql.checkRows(3) + tdSql.query("select hyperloglog(ct2.c1) from ct4 join ct2 on ct4.ts=ct2.ts") + tdSql.checkRows(1) + tdSql.checkData(0, 0, self.rows + 2) + tdSql.query("select hyperloglog(c1), c1 from stb1 group by c1") + for i in range(tdSql.queryRows): + tdSql.checkData(i, 0, 1) if tdSql.queryResult[i][1] is not None else tdSql.checkData(i, 0, 0) + + + + self.spread_check() + + def __test_error(self): + + tdLog.printNoPrefix("===step 0: err case, must return err") + tdSql.error( "select hyperloglog() from ct1" ) + tdSql.error( "select hyperloglog(c1, c2) from ct2" ) + tdSql.error( "select hyperloglog(1) from ct2" ) + tdSql.error( f"select hyperloglog({NUM_COL[0]}, {NUM_COL[1]}) from ct4" ) + tdSql.error( ''' select hyperloglog(['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10']) + from ct1 + where ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null + group by ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] + having ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null ''' ) + + def all_test(self): + self.__test_error() + self.__test_current() + + def __create_tb(self): + + tdLog.printNoPrefix("==========step1:create table") + create_stb_sql = f'''create table stb1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) tags (t1 int) + ''' + create_ntb_sql = f'''create table t1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) + ''' + tdSql.execute(create_stb_sql) + tdSql.execute(create_ntb_sql) + + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2} + + def __insert_data(self, rows): + now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) + for i in range(rows): + tdSql.execute( + f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f'''insert into ct1 values + ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) + ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) + ''' + ) + + tdSql.execute( + f'''insert into ct4 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000} + ) + ( + { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000} + ) + ''' + ) + + tdSql.execute( + f'''insert into ct2 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126, + { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127, + { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + for i in range(rows): + insert_data = f'''insert into t1 values + ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, + "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) + ''' + tdSql.execute(insert_data) + tdSql.execute( + f'''insert into t1 values + ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, + "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, + "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + + def run(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + self.__create_tb() + + tdLog.printNoPrefix("==========step2:insert data") + self.rows = 10 + self.__insert_data(self.rows) + + tdLog.printNoPrefix("==========step3:all check") + self.all_test() + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.execute("use db") + + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/join.py b/tests/system-test/2-query/join.py index 8fc131e58173faf31fcc4ffbc8fab08f6e937aea..140808d3874915b56cc3a0ee559e352a1a0589ae 100644 --- a/tests/system-test/2-query/join.py +++ b/tests/system-test/2-query/join.py @@ -36,17 +36,14 @@ class TDTestCase: query_condition.extend( ( f"{tbname}.{char_col}", - f"upper( {tbname}.{char_col} )", + # f"upper( {tbname}.{char_col} )", ) ) query_condition.extend( f"cast( {tbname}.{un_char_col} as binary(16) ) " for un_char_col in NUM_COL) - query_condition.extend( f"cast( {tbname}.{char_col} + {tbname}.{char_col_2} as binary(32) ) " for char_col_2 in CHAR_COL ) - query_condition.extend( f"cast( {tbname}.{char_col} + {tbname}.{un_char_col} as binary(32) ) " for un_char_col in NUM_COL ) for num_col in NUM_COL: query_condition.extend( ( - f"{tbname}.{num_col}", - f"sin( {tbname}.{num_col} )" + f"sin( {tbname}.{num_col} )", ) ) query_condition.extend( f"{tbname}.{num_col} + {tbname}.{num_col_1} " for num_col_1 in NUM_COL ) @@ -55,41 +52,115 @@ class TDTestCase: return query_condition - def __join_condition(self, tb_list, filter=PRIMARY_COL): - # sourcery skip: flip-comparison - if 1 == len(tb_list): - join_filter = f"{tb_list[0]}.{filter} = {tb_list[0]}.{filter} " - elif 2 == len(tb_list): - join_filter = f"{tb_list[0]}.{filter} = {tb_list[1]}.{filter} " - else: - join_filter = f"{tb_list[0]}.{filter} = {tb_list[1]}.{filter} " - for i in range(1, len(tb_list)-1 ): - join_filter += f"and {tb_list[i]}.{filter} = {tb_list[i+1]}.{filter}" - - return join_filter - - def __where_condition(self, col, tbname): + def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False): + table_reference = tb_list[0] + join_condition = table_reference + join = "inner join" if INNER else "join" + for i in range(len(tb_list[1:])): + join_condition += f" {join} {tb_list[i+1]} on {table_reference}.{filter}={tb_list[i+1]}.{filter}" + + return join_condition + + def __where_condition(self, col=None, tbname=None, query_conditon=None): + if query_conditon and isinstance(query_conditon, str): + if query_conditon.startswith("count"): + query_conditon = query_conditon[6:-1] + elif query_conditon.startswith("max"): + query_conditon = query_conditon[4:-1] + elif query_conditon.startswith("sum"): + query_conditon = query_conditon[4:-1] + elif query_conditon.startswith("min"): + query_conditon = query_conditon[4:-1] + + if query_conditon: + return f" where {query_conditon} is not null" if col in NUM_COL: - return f" abs( {tbname}.{col} ) >= 0" - elif col in CHAR_COL: - return f" lower( {tbname}.{col} ) like 'bina%' or lower( {tbname}.{col} ) like '_cha%' " - elif col in BOOLEAN_COL: - return f" {tbname}.{col} in (false, true) " - elif col in TS_TYPE_COL or col in PRIMARY_COL: - return f" cast( {tbname}.{col} as binary(16) ) is not null " - else: - return "" - - def __group_condition(self, tbname, col, having = ""): + return f" where abs( {tbname}.{col} ) >= 0" + if col in CHAR_COL: + return f" where lower( {tbname}.{col} ) like 'bina%' or lower( {tbname}.{col} ) like '_cha%' " + if col in BOOLEAN_COL: + return f" where {tbname}.{col} in (false, true) " + if col in TS_TYPE_COL or col in PRIMARY_COL: + return f" where cast( {tbname}.{col} as binary(16) ) is not null " + + return "" + + def __group_condition(self, col, having = None): + if isinstance(col, str): + if col.startswith("count"): + col = col[6:-1] + elif col.startswith("max"): + col = col[4:-1] + elif col.startswith("sum"): + col = col[4:-1] + elif col.startswith("min"): + col = col[4:-1] return f" group by {col} having {having}" if having else f" group by {col} " - def __join_check(self, tblist, checkrows, join_flag=True): + def __gen_sql(self, select_clause, from_clause, where_condition="", group_condition=""): + if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0] != from_clause.split(".")[0]: + return + return f"select {select_clause} from {from_clause} {where_condition} {group_condition}" + + @property + def __join_tblist(self): + return [ + # ["ct1", "ct2"], + ["ct1", "ct4"], + ["ct1", "t1"], + # ["ct2", "ct4"], + # ["ct2", "t1"], + # ["ct4", "t1"], + # ["ct1", "ct2", "ct4"], + # ["ct1", "ct2", "t1"], + # ["ct1", "ct4", "t1"], + # ["ct2", "ct4", "t1"], + # ["ct1", "ct2", "ct4", "t1"], + ] + + @property + def __sqls_list(self): + sqls = [] + __join_tblist = self.__join_tblist + for join_tblist in __join_tblist: + for join_tb in join_tblist: + select_claus_list = self.__query_condition(join_tb) + for select_claus in select_claus_list: + group_claus = self.__group_condition( col=select_claus) + where_claus = self.__where_condition( query_conditon=select_claus ) + having_claus = self.__group_condition( col=select_claus, having=f"{select_claus} is not null" ) + sqls.extend( + ( + # self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus, group_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus, having_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus), + # self.__gen_sql(select_claus, self.__join_condition(join_tblist), group_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist), having_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist)), + # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, group_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, having_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, ), + self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), having_claus ), + # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), group_claus ), + self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True) ), + ) + ) + return list(filter(None, sqls)) + + def __join_check(self,): + tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") + for i in range(len(self.__sqls_list)): + tdSql.query(self.__sqls_list[i]) + # if i % 10 == 0 : + # tdLog.success(f"{i} sql is already executed success !") + + def __join_check_old(self, tblist, checkrows, join_flag=True): query_conditions = self.__query_condition(tblist[0]) join_condition = self.__join_condition(tb_list=tblist) if join_flag else " " for condition in query_conditions: where_condition = self.__where_condition(col=condition, tbname=tblist[0]) - group_having = self.__group_condition(tbname=tblist[0], col=condition, having=f"{condition} is not null " ) - group_no_having= self.__group_condition(tbname=tblist[0], col=condition ) + group_having = self.__group_condition(col=condition, having=f"{condition} is not null " ) + group_no_having= self.__group_condition(col=condition ) groups = ["", group_having, group_no_having] for group_condition in groups: if where_condition: @@ -116,23 +187,6 @@ class TDTestCase: tdSql.query(sql=sql) # tdSql.checkRows(checkrows) - - def __test_current(self): - # sourcery skip: extract-duplicate-method, inline-immediately-returned-variable - tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") - tblist_1 = ["ct1", "ct2"] - self.__join_check(tblist_1, 1) - tdLog.printNoPrefix(f"==========current sql condition check in {tblist_1} over==========") - tblist_2 = ["ct2", "ct4"] - self.__join_check(tblist_2, self.rows) - tdLog.printNoPrefix(f"==========current sql condition check in {tblist_2} over==========") - tblist_3 = ["t1", "ct4"] - self.__join_check(tblist_3, 1) - tdLog.printNoPrefix(f"==========current sql condition check in {tblist_3} over==========") - tblist_4 = ["t1", "ct1"] - self.__join_check(tblist_4, 1) - tdLog.printNoPrefix(f"==========current sql condition check in {tblist_4} over==========") - def __test_error(self): # sourcery skip: extract-duplicate-method, move-assign-in-block tdLog.printNoPrefix("==========err sql condition check , must return error==========") @@ -141,17 +195,17 @@ class TDTestCase: err_list_3 = ["ct1","ct4", "t1"] err_list_4 = ["ct2","ct4", "t1"] err_list_5 = ["ct1", "ct2","ct4", "t1"] - self.__join_check(err_list_1, -1) + self.__join_check_old(err_list_1, -1) tdLog.printNoPrefix(f"==========err sql condition check in {err_list_1} over==========") - self.__join_check(err_list_2, -1) + self.__join_check_old(err_list_2, -1) tdLog.printNoPrefix(f"==========err sql condition check in {err_list_2} over==========") - self.__join_check(err_list_3, -1) + self.__join_check_old(err_list_3, -1) tdLog.printNoPrefix(f"==========err sql condition check in {err_list_3} over==========") - self.__join_check(err_list_4, -1) + self.__join_check_old(err_list_4, -1) tdLog.printNoPrefix(f"==========err sql condition check in {err_list_4} over==========") - self.__join_check(err_list_5, -1) + self.__join_check_old(err_list_5, -1) tdLog.printNoPrefix(f"==========err sql condition check in {err_list_5} over==========") - self.__join_check(["ct2", "ct4"], -1, join_flag=False) + self.__join_check_old(["ct2", "ct4"], -1, join_flag=False) tdLog.printNoPrefix("==========err sql condition check in has no join condition over==========") tdSql.error( f"select c1, c2 from ct2, ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL}" ) @@ -172,7 +226,7 @@ class TDTestCase: def all_test(self): - self.__test_current() + self.__join_check() self.__test_error() diff --git a/tests/system-test/2-query/join2.py b/tests/system-test/2-query/join2.py new file mode 100644 index 0000000000000000000000000000000000000000..40da41eee76f7fe4be70a8217c06ac0f94fd8981 --- /dev/null +++ b/tests/system-test/2-query/join2.py @@ -0,0 +1,357 @@ +import datetime + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +PRIMARY_COL = "ts" + +INT_COL = "c1" +BINT_COL = "c2" +SINT_COL = "c3" +TINT_COL = "c4" +FLOAT_COL = "c5" +DOUBLE_COL = "c6" +BOOL_COL = "c7" + +BINARY_COL = "c8" +NCHAR_COL = "c9" +TS_COL = "c10" + +NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ] +CHAR_COL = [ BINARY_COL, NCHAR_COL, ] +BOOLEAN_COL = [ BOOL_COL, ] +TS_TYPE_COL = [ TS_COL, ] + +class TDTestCase: + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), True) + + def __query_condition(self,tbname): + query_condition = [] + for char_col in CHAR_COL: + query_condition.extend( + ( + f"{tbname}.{char_col}", + # f"upper( {tbname}.{char_col} )", + ) + ) + query_condition.extend( f"cast( {tbname}.{un_char_col} as binary(16) ) " for un_char_col in NUM_COL) + for num_col in NUM_COL: + query_condition.extend( + ( + f"sin( {tbname}.{num_col} )", + ) + ) + query_condition.extend( f"{tbname}.{num_col} + {tbname}.{num_col_1} " for num_col_1 in NUM_COL ) + + query_condition.append(''' "test1234!@#$%^&*():'>= 0" + if col in CHAR_COL: + return f" where lower( {tbname}.{col} ) like 'bina%' or lower( {tbname}.{col} ) like '_cha%' " + if col in BOOLEAN_COL: + return f" where {tbname}.{col} in (false, true) " + if col in TS_TYPE_COL or col in PRIMARY_COL: + return f" where cast( {tbname}.{col} as binary(16) ) is not null " + + return "" + + def __group_condition(self, col, having = None): + if isinstance(col, str): + if col.startswith("count"): + col = col[6:-1] + elif col.startswith("max"): + col = col[4:-1] + elif col.startswith("sum"): + col = col[4:-1] + elif col.startswith("min"): + col = col[4:-1] + return f" group by {col} having {having}" if having else f" group by {col} " + + def __gen_sql(self, select_clause, from_clause, where_condition="", group_condition=""): + if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0] != from_clause.split(".")[0]: + return + return f"select {select_clause} from {from_clause} {where_condition} {group_condition}" + + @property + def __join_tblist(self): + return [ + # ["ct1", "ct2"], + # ["ct1", "ct4"], + # ["ct1", "t1"], + ["ct2", "ct4"], + # ["ct2", "t1"], + ["ct4", "t1"], + # ["ct1", "ct2", "ct4"], + # ["ct1", "ct2", "t1"], + # ["ct1", "ct4", "t1"], + # ["ct2", "ct4", "t1"], + # ["ct1", "ct2", "ct4", "t1"], + ] + + @property + def __sqls_list(self): + sqls = [] + __join_tblist = self.__join_tblist + for join_tblist in __join_tblist: + for join_tb in join_tblist: + select_claus_list = self.__query_condition(join_tb) + for select_claus in select_claus_list: + group_claus = self.__group_condition( col=select_claus) + where_claus = self.__where_condition( query_conditon=select_claus ) + having_claus = self.__group_condition( col=select_claus, having=f"{select_claus} is not null" ) + sqls.extend( + ( + # self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus, group_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus, having_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus), + # self.__gen_sql(select_claus, self.__join_condition(join_tblist), group_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist), having_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist)), + # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, group_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, having_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, ), + self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), having_claus ), + # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), group_claus ), + self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True) ), + ) + ) + return list(filter(None, sqls)) + + def __join_check(self,): + tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") + for i in range(len(self.__sqls_list)): + tdSql.query(self.__sqls_list[i]) + # if i % 10 == 0 : + # tdLog.success(f"{i} sql is already executed success !") + + def __join_check_old(self, tblist, checkrows, join_flag=True): + query_conditions = self.__query_condition(tblist[0]) + join_condition = self.__join_condition(tb_list=tblist) if join_flag else " " + for condition in query_conditions: + where_condition = self.__where_condition(col=condition, tbname=tblist[0]) + group_having = self.__group_condition(col=condition, having=f"{condition} is not null " ) + group_no_having= self.__group_condition(col=condition ) + groups = ["", group_having, group_no_having] + for group_condition in groups: + if where_condition: + sql = f" select {condition} from {tblist[0]},{tblist[1]} where {join_condition} and {where_condition} {group_condition} " + else: + sql = f" select {condition} from {tblist[0]},{tblist[1]} where {join_condition} {group_condition} " + + if not join_flag : + tdSql.error(sql=sql) + break + if len(tblist) == 2: + if "ct1" in tblist or "t1" in tblist: + self.__join_current(sql, checkrows) + elif where_condition or "not null" in group_condition: + self.__join_current(sql, checkrows + 2 ) + elif group_condition: + self.__join_current(sql, checkrows + 3 ) + else: + self.__join_current(sql, checkrows + 5 ) + if len(tblist) > 2 or len(tblist) < 1: + tdSql.error(sql=sql) + + def __join_current(self, sql, checkrows): + tdSql.query(sql=sql) + # tdSql.checkRows(checkrows) + + def __test_error(self): + # sourcery skip: extract-duplicate-method, move-assign-in-block + tdLog.printNoPrefix("==========err sql condition check , must return error==========") + err_list_1 = ["ct1","ct2", "ct4"] + err_list_2 = ["ct1","ct2", "t1"] + err_list_3 = ["ct1","ct4", "t1"] + err_list_4 = ["ct2","ct4", "t1"] + err_list_5 = ["ct1", "ct2","ct4", "t1"] + self.__join_check_old(err_list_1, -1) + tdLog.printNoPrefix(f"==========err sql condition check in {err_list_1} over==========") + self.__join_check_old(err_list_2, -1) + tdLog.printNoPrefix(f"==========err sql condition check in {err_list_2} over==========") + self.__join_check_old(err_list_3, -1) + tdLog.printNoPrefix(f"==========err sql condition check in {err_list_3} over==========") + self.__join_check_old(err_list_4, -1) + tdLog.printNoPrefix(f"==========err sql condition check in {err_list_4} over==========") + self.__join_check_old(err_list_5, -1) + tdLog.printNoPrefix(f"==========err sql condition check in {err_list_5} over==========") + self.__join_check_old(["ct2", "ct4"], -1, join_flag=False) + tdLog.printNoPrefix("==========err sql condition check in has no join condition over==========") + + tdSql.error( f"select c1, c2 from ct2, ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL}" ) + tdSql.error( f"select ct2.c1, ct2.c2 from ct2, ct4 where ct2.{INT_COL}=ct4.{INT_COL}" ) + tdSql.error( f"select ct2.c1, ct2.c2 from ct2, ct4 where ct2.{TS_COL}=ct4.{TS_COL}" ) + tdSql.error( f"select ct2.c1, ct2.c2 from ct2, ct4 where ct2.{PRIMARY_COL}=ct4.{TS_COL}" ) + tdSql.error( f"select ct2.c1, ct1.c2 from ct2, ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL}" ) + tdSql.error( f"select ct2.c1, ct4.c2 from ct2, ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL} and c1 is not null " ) + tdSql.error( f"select ct2.c1, ct4.c2 from ct2, ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL} and ct1.c1 is not null " ) + + + tbname = ["ct1", "ct2", "ct4", "t1"] + + # for tb in tbname: + # for errsql in self.__join_err_check(tb): + # tdSql.error(sql=errsql) + # tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========") + + + def all_test(self): + self.__join_check() + self.__test_error() + + + def __create_tb(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + create_stb_sql = f'''create table stb1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) tags (t1 int) + ''' + create_ntb_sql = f'''create table t1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) + ''' + tdSql.execute(create_stb_sql) + tdSql.execute(create_ntb_sql) + + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2} + + def __insert_data(self, rows): + now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) + for i in range(rows): + tdSql.execute( + f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f'''insert into ct1 values + ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) + ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) + ''' + ) + + tdSql.execute( + f'''insert into ct4 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000} + ) + ( + { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000} + ) + ''' + ) + + tdSql.execute( + f'''insert into ct2 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126, + { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127, + { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + for i in range(rows): + insert_data = f'''insert into t1 values + ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, + "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) + ''' + tdSql.execute(insert_data) + tdSql.execute( + f'''insert into t1 values + ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, + "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, + "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + + def run(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + self.__create_tb() + + tdLog.printNoPrefix("==========step2:insert data") + self.rows = 10 + self.__insert_data(self.rows) + + tdLog.printNoPrefix("==========step3:all check") + self.all_test() + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.execute("use db") + + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/json_tag.py b/tests/system-test/2-query/json_tag.py new file mode 100644 index 0000000000000000000000000000000000000000..d8ef8fa363fe1f2911f4ea3fde9d43a82f7a8a88 --- /dev/null +++ b/tests/system-test/2-query/json_tag.py @@ -0,0 +1,565 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, db_test.stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +import json + + +class TDTestCase: + def caseDescription(self): + ''' + Json tag test case, include create table with json tag, select json tag and query with json tag in where condition, besides, include json tag in group by/order by/join/subquery. + case1: [TD-12452] fix error if json tag is NULL + case2: [TD-12389] describe child table, tag length error if the tag is json tag + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + print("============== STEP 1 ===== prepare data & validate json string") + tdSql.error("create table if not exists jsons1(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json, tagint int)") + tdSql.error("create table if not exists jsons1(ts timestamp, data json) tags(tagint int)") + tdSql.execute("create table if not exists jsons1(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)") + tdSql.execute("insert into jsons1_1 using jsons1 tags('{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}') values(1591060618000, 1, false, 'json1', '你是') (1591060608000, 23, true, '等等', 'json')") + tdSql.execute("insert into jsons1_2 using jsons1 tags('{\"tag1\":5,\"tag2\":\"beijing\"}') values (1591060628000, 2, true, 'json2', 'sss')") + tdSql.execute("insert into jsons1_3 using jsons1 tags('{\"tag1\":false,\"tag2\":\"beijing\"}') values (1591060668000, 3, false, 'json3', 'efwe')") + tdSql.execute("insert into jsons1_4 using jsons1 tags('{\"tag1\":null,\"tag2\":\"shanghai\",\"tag3\":\"hello\"}') values (1591060728000, 4, true, 'json4', '323sd')") + tdSql.execute("insert into jsons1_5 using jsons1 tags('{\"tag1\":1.232, \"tag2\":null}') values(1591060928000, 1, false, '你就会', 'ewe')") + tdSql.execute("insert into jsons1_6 using jsons1 tags('{\"tag1\":11,\"tag2\":\"\",\"tag2\":null}') values(1591061628000, 11, false, '你就会','')") + tdSql.execute("insert into jsons1_7 using jsons1 tags('{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":null}') values(1591062628000, 2, NULL, '你就会', 'dws')") + + # test duplicate key using the first one. elimate empty key + tdSql.execute("CREATE TABLE if not exists jsons1_8 using jsons1 tags('{\"tag1\":null, \"tag1\":true, \"tag1\":45, \"1tag$\":2, \" \":90, \"\":32}')") + #tdSql.query("select jtag from jsons1_8") + #tdSql.checkData(0, 0, '{"tag1":null,"1tag$":2," ":90}') + + # test empty json string, save as jtag is NULL + tdSql.execute("insert into jsons1_9 using jsons1 tags('\t') values (1591062328000, 24, NULL, '你就会', '2sdw')") + tdSql.execute("CREATE TABLE if not exists jsons1_10 using jsons1 tags('')") + tdSql.execute("CREATE TABLE if not exists jsons1_11 using jsons1 tags(' ')") + tdSql.execute("CREATE TABLE if not exists jsons1_12 using jsons1 tags('{}')") + tdSql.execute("CREATE TABLE if not exists jsons1_13 using jsons1 tags('null')") + + # test invalidate json + tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('\"efwewf\"')") + tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('3333')") + tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('33.33')") + tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('false')") + tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('[1,true]')") + tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{222}')") + tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"fe\"}')") + # + # test invalidate json key, key must can be printed assic char + tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"tag1\":[1,true]}')") + tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"tag1\":{}}')") + tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"。loc\":\"fff\"}')") + tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"\t\":\"fff\"}')") + tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"试试\":\"fff\"}')") + + # test invalidate json value, value number can not be inf,nan TD-12166 + tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"k\":1.8e308}')") + tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"k\":-1.8e308}')") + # + #test length limit + char1= ''.join(['abcd']*64) + char3= ''.join(['abcd']*1021) + print(len(char3)) # 4084 + tdSql.error("CREATE TABLE if not exists jsons1_15 using jsons1 tags('{\"%s1\":5}')" % char1) # len(key)=257 + tdSql.execute("CREATE TABLE if not exists jsons1_15 using jsons1 tags('{\"%s\":5}')" % char1) # len(key)=256 + tdSql.error("CREATE TABLE if not exists jsons1_16 using jsons1 tags('{\"TSSSS\":\"%s\"}')" % char3) # len(object)=4096 + #tdSql.execute("CREATE TABLE if not exists jsons1_16 using jsons1 tags('{\"TSSS\":\"%s\"}')" % char3) # len(object)=4095 + tdSql.execute("drop table if exists jsons1_15") + tdSql.execute("drop table if exists jsons1_16") + # + print("============== STEP 2 ===== alter table json tag") + tdSql.error("ALTER STABLE jsons1 add tag tag2 nchar(20)") + tdSql.error("ALTER STABLE jsons1 drop tag jtag") + tdSql.error("ALTER TABLE jsons1 MODIFY TAG jtag nchar(128)") + # + tdSql.execute("ALTER TABLE jsons1_1 SET TAG jtag='{\"tag1\":\"femail\",\"tag2\":35,\"tag3\":true}'") + # tdSql.query("select jtag from jsons1_1") + # tdSql.checkData(0, 0, '{"tag1":"femail","tag2":35,"tag3":true}') + tdSql.execute("ALTER TABLE jsons1 rename TAG jtag jtag_new") + tdSql.execute("ALTER TABLE jsons1 rename TAG jtag_new jtag") + + tdSql.execute("create table st(ts timestamp, i int) tags(t int)") + tdSql.error("ALTER STABLE st add tag jtag json") + tdSql.error("ALTER STABLE st add column jtag json") + # + # print("============== STEP 3 ===== query table") + # # test error syntax + # tdSql.error("select * from jsons1 where jtag->tag1='beijing'") + # tdSql.error("select * from jsons1 where jtag->'location'") + # tdSql.error("select * from jsons1 where jtag->''") + # tdSql.error("select * from jsons1 where jtag->''=9") + # tdSql.error("select -> from jsons1") + # tdSql.error("select * from jsons1 where contains") + # tdSql.error("select * from jsons1 where jtag->") + # tdSql.error("select jtag->location from jsons1") + # tdSql.error("select jtag contains location from jsons1") + # tdSql.error("select * from jsons1 where jtag contains location") + # tdSql.error("select * from jsons1 where jtag contains''") + # tdSql.error("select * from jsons1 where jtag contains 'location'='beijing'") + # + # # test function error + # tdSql.error("select avg(jtag->'tag1') from jsons1") + # tdSql.error("select avg(jtag) from jsons1") + # tdSql.error("select min(jtag->'tag1') from jsons1") + # tdSql.error("select min(jtag) from jsons1") + # tdSql.error("select ceil(jtag->'tag1') from jsons1") + # tdSql.error("select ceil(jtag) from jsons1") + # + # # test select normal column + # tdSql.query("select dataint from jsons1") + # tdSql.checkRows(9) + # tdSql.checkData(1, 0, 1) + + # test select json tag + # tdSql.query("select * from jsons1") + # tdSql.checkRows(8) + # tdSql.query("select jtag from jsons1") + # tdSql.checkRows(7) + # tdSql.query("select jtag from jsons1 where jtag is null") + # tdSql.checkRows(5) + # tdSql.query("select jtag from jsons1 where jtag is not null") + # tdSql.checkRows(8) + + # test jtag is NULL + #tdSql.query("select jtag from jsons1_9") + #tdSql.checkData(0, 0, None) + + # # test select json tag->'key', value is string + # tdSql.query("select jtag->'tag1' from jsons1_1") + # tdSql.checkData(0, 0, '"femail"') + # tdSql.query("select jtag->'tag2' from jsons1_6") + # tdSql.checkData(0, 0, '""') + # # test select json tag->'key', value is int + # tdSql.query("select jtag->'tag2' from jsons1_1") + # tdSql.checkData(0, 0, 35) + # # test select json tag->'key', value is bool + # tdSql.query("select jtag->'tag3' from jsons1_1") + # tdSql.checkData(0, 0, "true") + # # test select json tag->'key', value is null + # tdSql.query("select jtag->'tag1' from jsons1_4") + # tdSql.checkData(0, 0, "null") + # # test select json tag->'key', value is double + # tdSql.query("select jtag->'tag1' from jsons1_5") + # tdSql.checkData(0, 0, "1.232000000") + # # test select json tag->'key', key is not exist + # tdSql.query("select jtag->'tag10' from jsons1_4") + # tdSql.checkData(0, 0, None) + # + # tdSql.query("select jtag->'tag1' from jsons1") + # tdSql.checkRows(13) + # test header name + res = tdSql.getColNameList("select jtag->'tag1' from jsons1") + cname_list = [] + cname_list.append("jtag->'tag1'") + tdSql.checkColNameList(res, cname_list) + + + + # # test where with json tag + # tdSql.error("select * from jsons1_1 where jtag is not null") + # tdSql.error("select * from jsons1 where jtag='{\"tag1\":11,\"tag2\":\"\"}'") + # tdSql.error("select * from jsons1 where jtag->'tag1'={}") + # + # # where json value is string + # tdSql.query("select * from jsons1 where jtag->'tag2'='beijing'") + # tdSql.checkRows(2) + # tdSql.query("select dataint,tbname,jtag->'tag1',jtag from jsons1 where jtag->'tag2'='beijing'") + # tdSql.checkData(0, 0, 2) + # tdSql.checkData(0, 1, 'jsons1_2') + # tdSql.checkData(0, 2, 5) + # tdSql.checkData(0, 3, '{"tag1":5,"tag2":"beijing"}') + # tdSql.checkData(1, 0, 3) + # tdSql.checkData(1, 1, 'jsons1_3') + # tdSql.checkData(1, 2, 'false') + # tdSql.query("select * from jsons1 where jtag->'tag1'='beijing'") + # tdSql.checkRows(0) + # tdSql.query("select * from jsons1 where jtag->'tag1'='收到货'") + # tdSql.checkRows(1) + # tdSql.query("select * from jsons1 where jtag->'tag2'>'beijing'") + # tdSql.checkRows(1) + # tdSql.query("select * from jsons1 where jtag->'tag2'>='beijing'") + # tdSql.checkRows(3) + # tdSql.query("select * from jsons1 where jtag->'tag2'<'beijing'") + # tdSql.checkRows(2) + # tdSql.query("select * from jsons1 where jtag->'tag2'<='beijing'") + # tdSql.checkRows(4) + # tdSql.query("select * from jsons1 where jtag->'tag2'!='beijing'") + # tdSql.checkRows(3) + # tdSql.query("select * from jsons1 where jtag->'tag2'=''") + # tdSql.checkRows(2) + # + # # where json value is int + # tdSql.query("select * from jsons1 where jtag->'tag1'=5") + # tdSql.checkRows(1) + # tdSql.checkData(0, 1, 2) + # tdSql.query("select * from jsons1 where jtag->'tag1'=10") + # tdSql.checkRows(0) + # tdSql.query("select * from jsons1 where jtag->'tag1'<54") + # tdSql.checkRows(3) + # tdSql.query("select * from jsons1 where jtag->'tag1'<=11") + # tdSql.checkRows(3) + # tdSql.query("select * from jsons1 where jtag->'tag1'>4") + # tdSql.checkRows(2) + # tdSql.query("select * from jsons1 where jtag->'tag1'>=5") + # tdSql.checkRows(2) + # tdSql.query("select * from jsons1 where jtag->'tag1'!=5") + # tdSql.checkRows(2) + # tdSql.query("select * from jsons1 where jtag->'tag1'!=55") + # tdSql.checkRows(3) + # + # # where json value is double + # tdSql.query("select * from jsons1 where jtag->'tag1'=1.232") + # tdSql.checkRows(1) + # tdSql.query("select * from jsons1 where jtag->'tag1'<1.232") + # tdSql.checkRows(0) + # tdSql.query("select * from jsons1 where jtag->'tag1'<=1.232") + # tdSql.checkRows(1) + # tdSql.query("select * from jsons1 where jtag->'tag1'>1.23") + # tdSql.checkRows(3) + # tdSql.query("select * from jsons1 where jtag->'tag1'>=1.232") + # tdSql.checkRows(3) + # tdSql.query("select * from jsons1 where jtag->'tag1'!=1.232") + # tdSql.checkRows(2) + # tdSql.query("select * from jsons1 where jtag->'tag1'!=3.232") + # tdSql.checkRows(3) + # tdSql.error("select * from jsons1 where jtag->'tag1'/0=3") + # tdSql.error("select * from jsons1 where jtag->'tag1'/5=1") + # + # # where json value is bool + # tdSql.query("select * from jsons1 where jtag->'tag1'=true") + # tdSql.checkRows(0) + # tdSql.query("select * from jsons1 where jtag->'tag1'=false") + # tdSql.checkRows(1) + # tdSql.query("select * from jsons1 where jtag->'tag1'!=false") + # tdSql.checkRows(0) + # tdSql.error("select * from jsons1 where jtag->'tag1'>false") + # + # # where json value is null + # tdSql.query("select * from jsons1 where jtag->'tag1'=null") # only json suport =null. This synatx will change later. + # tdSql.checkRows(1) + # + # # where json is null + # tdSql.query("select * from jsons1 where jtag is null") + # tdSql.checkRows(1) + # tdSql.query("select * from jsons1 where jtag is not null") + # tdSql.checkRows(8) + # + # # where json key is null + # tdSql.query("select * from jsons1 where jtag->'tag_no_exist'=3") + # tdSql.checkRows(0) + # + # # where json value is not exist + # tdSql.query("select * from jsons1 where jtag->'tag1' is null") + # tdSql.checkData(0, 0, 'jsons1_9') + # tdSql.checkRows(1) + # tdSql.query("select * from jsons1 where jtag->'tag4' is null") + # tdSql.checkRows(9) + # tdSql.query("select * from jsons1 where jtag->'tag3' is not null") + # tdSql.checkRows(4) + # + # # test contains + # tdSql.query("select * from jsons1 where jtag contains 'tag1'") + # tdSql.checkRows(8) + # tdSql.query("select * from jsons1 where jtag contains 'tag3'") + # tdSql.checkRows(4) + # tdSql.query("select * from jsons1 where jtag contains 'tag_no_exist'") + # tdSql.checkRows(0) + # + # # test json tag in where condition with and/or + # tdSql.query("select * from jsons1 where jtag->'tag1'=false and jtag->'tag2'='beijing'") + # tdSql.checkRows(1) + # tdSql.query("select * from jsons1 where jtag->'tag1'=false or jtag->'tag2'='beijing'") + # tdSql.checkRows(2) + # tdSql.query("select * from jsons1 where jtag->'tag1'=false and jtag->'tag2'='shanghai'") + # tdSql.checkRows(0) + # tdSql.query("select * from jsons1 where jtag->'tag1'=false and jtag->'tag2'='shanghai'") + # tdSql.checkRows(0) + # tdSql.query("select * from jsons1 where jtag->'tag1'=13 or jtag->'tag2'>35") + # tdSql.checkRows(0) + # tdSql.query("select * from jsons1 where jtag->'tag1'=13 or jtag->'tag2'>35") + # tdSql.checkRows(0) + # tdSql.query("select * from jsons1 where jtag->'tag1' is not null and jtag contains 'tag3'") + # tdSql.checkRows(4) + # tdSql.query("select * from jsons1 where jtag->'tag1'='femail' and jtag contains 'tag3'") + # tdSql.checkRows(2) + # + # + # # test with between and + # tdSql.query("select * from jsons1 where jtag->'tag1' between 1 and 30") + # tdSql.checkRows(3) + # tdSql.query("select * from jsons1 where jtag->'tag1' between 'femail' and 'beijing'") + # tdSql.checkRows(2) + # + # # test with tbname/normal column + # tdSql.query("select * from jsons1 where tbname = 'jsons1_1'") + # tdSql.checkRows(2) + # tdSql.query("select * from jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3'") + # tdSql.checkRows(2) + # tdSql.query("select * from jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3' and dataint=3") + # tdSql.checkRows(0) + # tdSql.query("select * from jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3' and dataint=23") + # tdSql.checkRows(1) + # + # + # # test where condition like + # tdSql.query("select *,tbname from jsons1 where jtag->'tag2' like 'bei%'") + # tdSql.checkRows(2) + # tdSql.query("select *,tbname from jsons1 where jtag->'tag1' like 'fe%' and jtag->'tag2' is not null") + # tdSql.checkRows(2) + # + # # test where condition in no support in + # tdSql.error("select * from jsons1 where jtag->'tag1' in ('beijing')") + # + # # test where condition match/nmath + # tdSql.query("select * from jsons1 where jtag->'tag1' match 'ma'") + # tdSql.checkRows(2) + # tdSql.query("select * from jsons1 where jtag->'tag1' match 'ma$'") + # tdSql.checkRows(0) + # tdSql.query("select * from jsons1 where jtag->'tag2' match 'jing$'") + # tdSql.checkRows(2) + # tdSql.query("select * from jsons1 where jtag->'tag1' match '收到'") + # tdSql.checkRows(1) + # tdSql.query("select * from jsons1 where jtag->'tag1' nmatch 'ma'") + # tdSql.checkRows(1) + # + # # test distinct + # tdSql.execute("insert into jsons1_14 using jsons1 tags('{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":null}') values(1591062628000, 2, NULL, '你就会', 'dws')") + # tdSql.query("select distinct jtag->'tag1' from jsons1") + # tdSql.checkRows(8) + # tdSql.query("select distinct jtag from jsons1") + # tdSql.checkRows(9) + # + # #test dumplicate key with normal colomn + # tdSql.execute("INSERT INTO jsons1_15 using jsons1 tags('{\"tbname\":\"tt\",\"databool\":true,\"datastr\":\"是是是\"}') values(1591060828000, 4, false, 'jjsf', \"你就会\")") + # tdSql.query("select *,tbname,jtag from jsons1 where jtag->'datastr' match '是' and datastr match 'js'") + # tdSql.checkRows(1) + # tdSql.query("select tbname,jtag->'tbname' from jsons1 where jtag->'tbname'='tt' and tbname='jsons1_14'") + # tdSql.checkRows(0) + # + # # test join + # tdSql.execute("create table if not exists jsons2(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)") + # tdSql.execute("insert into jsons2_1 using jsons2 tags('{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}') values(1591060618000, 2, false, 'json2', '你是2')") + # tdSql.execute("insert into jsons2_2 using jsons2 tags('{\"tag1\":5,\"tag2\":null}') values (1591060628000, 2, true, 'json2', 'sss')") + # + # tdSql.execute("create table if not exists jsons3(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)") + # tdSql.execute("insert into jsons3_1 using jsons3 tags('{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}') values(1591060618000, 3, false, 'json3', '你是3')") + # tdSql.execute("insert into jsons3_2 using jsons3 tags('{\"tag1\":5,\"tag2\":\"beijing\"}') values (1591060638000, 2, true, 'json3', 'sss')") + # tdSql.query("select 'sss',33,a.jtag->'tag3' from jsons2 a,jsons3 b where a.ts=b.ts and a.jtag->'tag1'=b.jtag->'tag1'") + # tdSql.checkData(0, 0, "sss") + # tdSql.checkData(0, 2, "true") + # + # res = tdSql.getColNameList("select 'sss',33,a.jtag->'tag3' from jsons2 a,jsons3 b where a.ts=b.ts and a.jtag->'tag1'=b.jtag->'tag1'") + # cname_list = [] + # cname_list.append("sss") + # cname_list.append("33") + # cname_list.append("a.jtag->'tag3'") + # tdSql.checkColNameList(res, cname_list) + # + # # test group by & order by json tag + # tdSql.error("select count(*) from jsons1 group by jtag->'tag1' order by jtag->'tag2'") + # tdSql.error("select count(*) from jsons1 group by jtag->'tag1' order by jtag") + # tdSql.query("select count(*) from jsons1 group by jtag->'tag1' order by jtag->'tag1' desc") + # tdSql.checkRows(8) + # tdSql.checkData(1, 0, 2) + # tdSql.checkData(1, 1, '"femail"') + # tdSql.checkData(2, 0, 1) + # tdSql.checkData(2, 1, 11) + # tdSql.checkData(5, 0, 1) + # tdSql.checkData(5, 1, "false") + # tdSql.checkData(6, 0, 1) + # tdSql.checkData(6, 1, "null") + # tdSql.checkData(7, 0, 2) + # tdSql.checkData(7, 1, None) + # + # tdSql.query("select count(*) from jsons1 group by jtag->'tag1' order by jtag->'tag1' asc") + # tdSql.checkRows(8) + # tdSql.checkData(0, 0, 2) + # tdSql.checkData(0, 1, None) + # tdSql.checkData(2, 0, 1) + # tdSql.checkData(2, 1, "false") + # tdSql.checkData(5, 0, 1) + # tdSql.checkData(5, 1, 11) + # tdSql.checkData(6, 0, 2) + # tdSql.checkData(6, 1, '"femail"') + # + # # test stddev with group by json tag + # tdSql.query("select stddev(dataint) from jsons1 group by jtag->'tag1'") + # tdSql.checkData(0, 0, 10) + # tdSql.checkData(0, 1, None) + # tdSql.checkData(1, 0, 0) + # tdSql.checkData(1, 1, "null") + # tdSql.checkData(6, 0, 11) + # tdSql.checkData(6, 1, '"femail"') + # + # res = tdSql.getColNameList("select stddev(dataint) from jsons1 group by jsons1.jtag->'tag1'") + # cname_list = [] + # cname_list.append("stddev(dataint)") + # cname_list.append("jsons1.jtag->'tag1'") + # tdSql.checkColNameList(res, cname_list) + # + # # test top/bottom with group by json tag + # tdSql.query("select top(dataint,100) from jsons1 group by jtag->'tag1'") + # tdSql.checkRows(11) + # tdSql.checkData(0, 1, 4) + # tdSql.checkData(1, 1, 24) + # tdSql.checkData(1, 2, None) + # tdSql.checkData(8, 1, 1) + # tdSql.checkData(8, 2, '"femail"') + # + # # test having + # tdSql.query("select stddev(dataint) from jsons1 group by jtag->'tag1' having stddev(dataint) > 0") + # tdSql.checkRows(2) + # + # # subquery with json tag + # tdSql.query("select * from (select jtag, dataint from jsons1)") + # tdSql.checkRows(11) + # tdSql.checkData(1, 1, 1) + # tdSql.checkData(2, 0, '{"tag1":5,"tag2":"beijing"}') + # + # tdSql.query("select jtag->'tag1' from (select jtag->'tag1', dataint from jsons1)") + # tdSql.checkRows(11) + # tdSql.checkData(1, 0, '"femail"') + # tdSql.checkData(2, 0, 5) + # + # res = tdSql.getColNameList("select jtag->'tag1' from (select jtag->'tag1', dataint from jsons1)") + # cname_list = [] + # cname_list.append("jtag->'tag1'") + # tdSql.checkColNameList(res, cname_list) + # + # tdSql.query("select ts,tbname,jtag->'tag1' from (select jtag->'tag1',tbname,ts from jsons1 order by ts)") + # tdSql.checkRows(11) + # tdSql.checkData(1, 1, "jsons1_1") + # tdSql.checkData(1, 2, '"femail"') + # + # # union all + # tdSql.error("select jtag->'tag1' from jsons1 union all select jtag->'tag2' from jsons2") + # tdSql.error("select jtag->'tag1' from jsons1_1 union all select jtag->'tag2' from jsons2_1") + # + # tdSql.query("select jtag->'tag1' from jsons1_1 union all select jtag->'tag1' from jsons2_1") + # tdSql.checkRows(2) + # tdSql.query("select dataint,jtag->'tag1',tbname from jsons1 union all select dataint,jtag->'tag1',tbname from jsons2") + # tdSql.checkRows(13) + # tdSql.query("select dataint,jtag,tbname from jsons1 union all select dataint,jtag,tbname from jsons2") + # tdSql.checkRows(13) + # + # #show create table + # tdSql.query("show create table jsons1") + # tdSql.checkData(0, 1, 'CREATE TABLE `jsons1` (`ts` TIMESTAMP,`dataint` INT,`databool` BOOL,`datastr` NCHAR(50),`datastrbin` BINARY(150)) TAGS (`jtag` JSON)') + # + # #test aggregate function:count/avg/twa/irate/sum/stddev/leastsquares + # tdSql.query("select count(*) from jsons1 where jtag is not null") + # tdSql.checkData(0, 0, 10) + # tdSql.query("select avg(dataint) from jsons1 where jtag is not null") + # tdSql.checkData(0, 0, 5.3) + # tdSql.error("select twa(dataint) from jsons1 where jtag is not null") + # tdSql.error("select irate(dataint) from jsons1 where jtag is not null") + # tdSql.query("select sum(dataint) from jsons1 where jtag->'tag1' is not null") + # tdSql.checkData(0, 0, 49) + # tdSql.query("select stddev(dataint) from jsons1 where jtag->'tag1'>1") + # tdSql.checkData(0, 0, 4.496912521) + # tdSql.error("SELECT LEASTSQUARES(dataint, 1, 1) from jsons1 where jtag is not null") + # + # #test selection function:min/max/first/last/top/bottom/percentile/apercentile/last_row/interp + # tdSql.query("select min(dataint) from jsons1 where jtag->'tag1'>1") + # tdSql.checkData(0, 0, 1) + # tdSql.query("select max(dataint) from jsons1 where jtag->'tag1'>1") + # tdSql.checkData(0, 0, 11) + # tdSql.query("select first(dataint) from jsons1 where jtag->'tag1'>1") + # tdSql.checkData(0, 0, 2) + # tdSql.query("select last(dataint) from jsons1 where jtag->'tag1'>1") + # tdSql.checkData(0, 0, 11) + # tdSql.query("select top(dataint,100) from jsons1 where jtag->'tag1'>1") + # tdSql.checkRows(3) + # tdSql.query("select bottom(dataint,100) from jsons1 where jtag->'tag1'>1") + # tdSql.checkRows(3) + # tdSql.error("select percentile(dataint,20) from jsons1 where jtag->'tag1'>1") + # tdSql.query("select apercentile(dataint, 50) from jsons1 where jtag->'tag1'>1") + # tdSql.checkData(0, 0, 1.5) + # tdSql.query("select last_row(dataint) from jsons1 where jtag->'tag1'>1") + # tdSql.checkData(0, 0, 11) + # tdSql.error("select interp(dataint) from jsons1 where ts = '2020-06-02 09:17:08.000' and jtag->'tag1'>1") + # + # #test calculation function:diff/derivative/spread/ceil/floor/round/ + # tdSql.error("select diff(dataint) from jsons1 where jtag->'tag1'>1") + # tdSql.error("select derivative(dataint, 10m, 0) from jsons1 where jtag->'tag1'>1") + # tdSql.query("select spread(dataint) from jsons1 where jtag->'tag1'>1") + # tdSql.checkData(0, 0, 10) + # tdSql.query("select ceil(dataint) from jsons1 where jtag->'tag1'>1") + # tdSql.checkRows(3) + # tdSql.query("select floor(dataint) from jsons1 where jtag->'tag1'>1") + # tdSql.checkRows(3) + # tdSql.query("select round(dataint) from jsons1 where jtag->'tag1'>1") + # tdSql.checkRows(3) + # + # #test TD-12077 + # tdSql.execute("insert into jsons1_16 using jsons1 tags('{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":-2.111}') values(1591062628000, 2, NULL, '你就会', 'dws')") + # tdSql.query("select jtag->'tag3' from jsons1_16") + # tdSql.checkData(0, 0, '-2.111000000') + # + # # test TD-12452 + # tdSql.execute("ALTER TABLE jsons1_1 SET TAG jtag=NULL") + # tdSql.query("select jtag from jsons1_1") + # tdSql.checkData(0, 0, None) + # tdSql.execute("CREATE TABLE if not exists jsons1_20 using jsons1 tags(NULL)") + # tdSql.query("select jtag from jsons1_20") + # tdSql.checkData(0, 0, None) + # tdSql.execute("insert into jsons1_21 using jsons1 tags(NULL) values(1591061628000, 11, false, '你就会','')") + # tdSql.query("select jtag from jsons1_21") + # tdSql.checkData(0, 0, None) + # + # #test TD-12389 + tdSql.query("describe jsons1") + tdSql.checkData(5, 2, 4095) + tdSql.query("describe jsons1_1") + tdSql.checkData(5, 2, 4095) + # + # #test TD-13918 + # tdSql.execute("drop table if exists jsons_13918_1") + # tdSql.execute("drop table if exists jsons_13918_2") + # tdSql.execute("drop table if exists jsons_13918_3") + # tdSql.execute("drop table if exists jsons_13918_4") + # tdSql.execute("drop table if exists jsons_stb") + # tdSql.execute("create table jsons_stb (ts timestamp, dataInt int) tags (jtag json)") + # tdSql.error("create table jsons_13918_1 using jsons_stb tags ('nullx')") + # tdSql.error("create table jsons_13918_2 using jsons_stb tags (nullx)") + # tdSql.error("insert into jsons_13918_3 using jsons_stb tags('NULLx') values(1591061628001, 11)") + # tdSql.error("insert into jsons_13918_4 using jsons_stb tags(NULLx) values(1591061628002, 11)") + # tdSql.execute("create table jsons_13918_1 using jsons_stb tags ('null')") + # tdSql.execute("create table jsons_13918_2 using jsons_stb tags (null)") + # tdSql.execute("insert into jsons_13918_1 values(1591061628003, 11)") + # tdSql.execute("insert into jsons_13918_2 values(1591061628004, 11)") + # tdSql.execute("insert into jsons_13918_3 using jsons_stb tags('NULL') values(1591061628005, 11)") + # tdSql.execute("insert into jsons_13918_4 using jsons_stb tags(\"NULL\") values(1591061628006, 11)") + # tdSql.query("select * from jsons_stb") + # tdSql.checkRows(4) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + diff --git a/tests/system-test/2-query/last.py b/tests/system-test/2-query/last.py index b491679c627b5bd65c1d4c67ed16b31c792d8a08..4ef13e9142f3a2ebc3ef55f6a2316fd6433908f3 100644 --- a/tests/system-test/2-query/last.py +++ b/tests/system-test/2-query/last.py @@ -170,7 +170,96 @@ class TDTestCase: tdSql.query("select last(col9) from db.stb_1") tdSql.checkRows(1) tdSql.checkData(0, 0, '涛思数据10') + tdSql.query("select last(col1,col2,col3) from stb_1") + tdSql.checkData(0,2,10) + tdSql.query("select last(*) from stb") + tdSql.checkRows(1) + tdSql.checkData(0, 1, 10) + tdSql.query("select last(*) from db.stb") + tdSql.checkRows(1) + tdSql.checkData(0, 1, 10) + tdSql.query("select last(col1) from stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select last(col1) from db.stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select last(col2) from stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select last(col2) from db.stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select last(col3) from stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select last(col3) from db.stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select last(col4) from stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select last(col4) from db.stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select last(col11) from stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select last(col11) from db.stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select last(col12) from stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select last(col12) from db.stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select last(col13) from stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select last(col13) from db.stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select last(col14) from stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select last(col14) from db.stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select last(col5) from stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 9.1) + tdSql.query("select last(col5) from db.stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 9.1) + tdSql.query("select last(col6) from stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 9.1) + tdSql.query("select last(col6) from db.stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 9.1) + tdSql.query("select last(col7) from stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, True) + tdSql.query("select last(col7) from db.stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, True) + tdSql.query("select last(col8) from stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 'taosdata10') + tdSql.query("select last(col8) from db.stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 'taosdata10') + tdSql.query("select last(col9) from stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, '涛思数据10') + tdSql.query("select last(col9) from db.stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, '涛思数据10') + tdSql.query("select last(col1,col2,col3) from stb") + tdSql.checkData(0,2,10) + tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''') @@ -322,7 +411,12 @@ class TDTestCase: tdSql.query("select last(col9) from db.ntb") tdSql.checkRows(1) tdSql.checkData(0, 0, '涛思数据10') - + tdSql.query("select last(col1,col2,col3) from ntb") + tdSql.checkData(0,2,10) + + tdSql.error("select col1 from stb where last(col9)='涛思数据10'") + tdSql.error("select col1 from ntb where last(col9)='涛思数据10'") + tdSql.error("select col1 from stb_1 where last(col9)='涛思数据10'") def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/system-test/2-query/mavg.py b/tests/system-test/2-query/mavg.py new file mode 100644 index 0000000000000000000000000000000000000000..c8cbd269f9ce2c3ae0e5bd6b0361f0eb4252b1a9 --- /dev/null +++ b/tests/system-test/2-query/mavg.py @@ -0,0 +1,677 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import subprocess +import random +import math +import numpy as np +import inspect +import re +import taos + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def mavg_query_form(self, sel="select", func="mavg(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""): + ''' + mavg function: + + :param sel: string, must be "select", required parameters; + :param func: string, in this case must be "mavg(", otherwise return other function, required parameters; + :param col: string, column name, required parameters; + :param m_comm: string, comma between col and k , required parameters; + :param k: int/float,the width of the sliding window, [1,100], required parameters; + :param r_comm: string, must be ")", use with "(" in func, required parameters; + :param alias: string, result column another name,or add other funtion; + :param fr: string, must be "from", required parameters; + :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters; + :param condition: expression; + :return: mavg query statement,default: select mavg(c1, 1) from t1 + ''' + + return f"{sel} {func} {col} {m_comm} {k} {r_comm} {alias} {fr} {table_expr} {condition}" + + def checkmavg(self,sel="select", func="mavg(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""): + # print(self.mavg_query_form(sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition)) + line = sys._getframe().f_back.f_lineno + + if not all([sel , func , col , m_comm , k , r_comm , fr , table_expr]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + sql = "select * from t1" + collist = tdSql.getColNameList(sql) + + if not isinstance(col, str): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if len([x for x in col.split(",") if x.strip()]) != 1: + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + col = col.replace(",", "").replace(" ", "") + + if any([re.compile('^[a-zA-Z]{1}.*$').match(col) is None , not col.replace(".","").isalnum()]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + # if all(["," in col , len(col.split(",")) != 2]): + # print(f"case in {line}: ", end='') + # return tdSql.error(self.mavg_query_form( + # sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition + # )) + # + # if ("," in col): + # if (not col.split(",")[0].strip()) ^ (not col.split(",")[1].strip()): + # col = col.strip().split(",")[0] if not col.split(",")[1].strip() else col.strip().split(",")[1] + # else: + # print(f"case in {line}: ", end='') + # return tdSql.error(self.mavg_query_form( + # sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition + # )) + # pass + + if '.' in col: + if any([col.split(".")[0] not in table_expr, col.split(".")[1] not in collist]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + if "." not in col: + if col not in collist: + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + colname = col if "." not in col else col.split(".")[1] + col_index = collist.index(colname) + if any([tdSql.cursor.istype(col_index, "TIMESTAMP"), tdSql.cursor.istype(col_index, "BOOL")]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if any([tdSql.cursor.istype(col_index, "BINARY") , tdSql.cursor.istype(col_index,"NCHAR")]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if any( [func != "mavg(" , r_comm != ")" , fr != "from", sel != "select"]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if all(["(" not in table_expr, "stb" in table_expr, "group" not in condition.lower()]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if "order by tbname" in condition.lower(): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if all(["group" in condition.lower(), "tbname" not in condition.lower()]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + alias_list = ["tbname", "_c0", "st", "ts"] + if all([alias, "," not in alias, not alias.isalnum()]): + # actually, column alias also support "_", but in this case,forbidden that。 + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if all([alias, "," in alias]): + if all(parm != alias.lower().split(",")[1].strip() for parm in alias_list): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + condition_exception = [ "~", "^", "insert", "distinct", + "count", "avg", "twa", "irate", "sum", "stddev", "leastquares", + "min", "max", "first", "last", "top", "bottom", "percentile", + "apercentile", "last_row", "interp", "diff", "derivative", + "spread", "ceil", "floor", "round", "interval", "fill", "slimit", "soffset"] + if "union" not in condition.lower(): + if any(parm in condition.lower().strip() for parm in condition_exception): + + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + if not any([isinstance(k, int) , isinstance(k, float)]) : + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + col=col, k=k, alias=alias, table_expr=table_expr, condition=condition + )) + + if not(1 <= k < 1001): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + col=col, k=k, alias=alias, table_expr=table_expr, condition=condition + )) + + k = int(k // 1) + pre_sql = re.sub("mavg\([a-z0-9 .,]*\)", f"count({col})", self.mavg_query_form( + col=col, table_expr=table_expr, condition=condition + )) + tdSql.query(pre_sql) + + if tdSql.queryRows == 0: + tdSql.query(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + print(f"case in {line}: ", end='') + tdSql.checkRows(0) + return + + if "group" in condition: + tb_condition = condition.split("group by")[1].split(" ")[1] + tdSql.query(f"select distinct {tb_condition} from {table_expr}") + query_result = tdSql.queryResult + query_rows = tdSql.queryRows + clear_condition = re.sub('order by [0-9a-z]*|slimit [0-9]*|soffset [0-9]*', "", condition) + + pre_row = 0 + for i in range(query_rows): + group_name = query_result[i][0] + if "where" in clear_condition: + pre_condition = re.sub('group by [0-9a-z]*', f"{tb_condition}='{group_name}'", clear_condition) + else: + pre_condition = "where " + re.sub('group by [0-9a-z]*',f"{tb_condition}='{group_name}'", clear_condition) + + tdSql.query(f"select {col} {alias} from {table_expr} {pre_condition}") + pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + pre_mavg = np.convolve(pre_data, np.ones(k), "valid")/k + tdSql.query(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + for j in range(len(pre_mavg)): + print(f"case in {line}:", end='') + tdSql.checkData(pre_row+j, 0, pre_mavg[j]) + pre_row += len(pre_mavg) + return + elif "union" in condition: + union_sql_0 = self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + ).split("union all")[0] + + union_sql_1 = self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + ).split("union all")[1] + + tdSql.query(union_sql_0) + union_mavg_0 = tdSql.queryResult + row_union_0 = tdSql.queryRows + + tdSql.query(union_sql_1) + union_mavg_1 = tdSql.queryResult + + tdSql.query(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + for i in range(tdSql.queryRows): + print(f"case in {line}: ", end='') + if i < row_union_0: + tdSql.checkData(i, 0, union_mavg_0[i][0]) + else: + tdSql.checkData(i, 0, union_mavg_1[i-row_union_0][0]) + return + + else: + tdSql.query(f"select {col} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}") + offset_val = condition.split("offset")[1].split(" ")[1] if "offset" in condition else 0 + pre_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + pre_mavg = pre_mavg = np.convolve(pre_result, np.ones(k), "valid")[offset_val:]/k + tdSql.query(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + for i in range(tdSql.queryRows): + print(f"case in {line}: ", end='') + tdSql.checkData(i, 0, pre_mavg[i]) + + pass + + def mavg_current_query(self) : + + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + + # case1~6: numeric col:int/bigint/tinyint/smallint/float/double + self.checkmavg() + case2 = {"col": "c2"} + self.checkmavg(**case2) + case3 = {"col": "c5"} + self.checkmavg(**case3) + case4 = {"col": "c7"} + self.checkmavg(**case4) + case5 = {"col": "c8"} + self.checkmavg(**case5) + case6 = {"col": "c9"} + self.checkmavg(**case6) + + # # case7~8: nested query + # case7 = {"table_expr": "(select c1 from stb1)"} + # self.checkmavg(**case7) + # case8 = {"table_expr": "(select mavg(c1, 1) c1 from stb1 group by tbname)"} + # self.checkmavg(**case8) + + # case9~10: mix with tbname/ts/tag/col + # case9 = {"alias": ", tbname"} + # self.checkmavg(**case9) + # case10 = {"alias": ", _c0"} + # self.checkmavg(**case10) + # case11 = {"alias": ", st1"} + # self.checkmavg(**case11) + # case12 = {"alias": ", c1"} + # self.checkmavg(**case12) + + # case13~15: with single condition + case13 = {"condition": "where c1 <= 10"} + self.checkmavg(**case13) + case14 = {"condition": "where c6 in (0, 1)"} + self.checkmavg(**case14) + case15 = {"condition": "where c1 between 1 and 10"} + self.checkmavg(**case15) + + # case16: with multi-condition + case16 = {"condition": "where c6=1 or c6 =0"} + self.checkmavg(**case16) + + # case17: only support normal table join + case17 = { + "col": "t1.c1", + "table_expr": "t1, t2", + "condition": "where t1.ts=t2.ts" + } + self.checkmavg(**case17) + # # case18~19: with group by + # case19 = { + # "table_expr": "stb1", + # "condition": "partition by tbname" + # } + # self.checkmavg(**case19) + + # case20~21: with order by + # case20 = {"condition": "order by ts"} + # self.checkmavg(**case20) + #case21 = { + # "table_expr": "stb1", + # "condition": "group by tbname order by tbname" + #} + #self.checkmavg(**case21) + + # # case22: with union + # case22 = { + # "condition": "union all select mavg( c1 , 1 ) from t2" + # } + # self.checkmavg(**case22) + + # case23: with limit/slimit + case23 = { + "condition": "limit 1" + } + self.checkmavg(**case23) + + # case24: value k range[1, 100], can be int or float, k = floor(k) + case24 = {"k": 3} + self.checkmavg(**case24) + case25 = {"k": 2.999} + self.checkmavg(**case25) + case26 = {"k": 1000} + self.checkmavg(**case26) + + pass + + def mavg_error_query(self) -> None : + # unusual test + + # form test + err1 = {"col": ""} + self.checkmavg(**err1) # no col + err2 = {"sel": ""} + self.checkmavg(**err2) # no select + err3 = {"func": "mavg", "col": "", "m_comm": "", "k": "", "r_comm": ""} + self.checkmavg(**err3) # no mavg condition: select mavg from + err4 = {"col": "", "m_comm": "", "k": ""} + self.checkmavg(**err4) # no mavg condition: select mavg() from + err5 = {"func": "mavg", "r_comm": ""} + self.checkmavg(**err5) # no brackets: select mavg col, k from + err6 = {"fr": ""} + self.checkmavg(**err6) # no from + err7 = {"k": ""} + self.checkmavg(**err7) # no k + err8 = {"table_expr": ""} + self.checkmavg(**err8) # no table_expr + + # err9 = {"col": "st1"} + # self.checkmavg(**err9) # col: tag + err10 = {"col": 1} + self.checkmavg(**err10) # col: value + err11 = {"col": "NULL"} + self.checkmavg(**err11) # col: NULL + err12 = {"col": "%_"} + self.checkmavg(**err12) # col: %_ + err13 = {"col": "c3"} + self.checkmavg(**err13) # col: timestamp col + err14 = {"col": "_c0"} + self.checkmavg(**err14) # col: Primary key + err15 = {"col": "avg(c1)"} + self.checkmavg(**err15) # expr col + err16 = {"col": "c4"} + self.checkmavg(**err16) # binary col + err17 = {"col": "c10"} + self.checkmavg(**err17) # nchar col + err18 = {"col": "c6"} + self.checkmavg(**err18) # bool col + err19 = {"col": "'c1'"} + self.checkmavg(**err19) # col: string + err20 = {"col": None} + self.checkmavg(**err20) # col: None + err21 = {"col": "''"} + self.checkmavg(**err21) # col: '' + err22 = {"col": "tt1.c1"} + self.checkmavg(**err22) # not table_expr col + err23 = {"col": "t1"} + self.checkmavg(**err23) # tbname + err24 = {"col": "stb1"} + self.checkmavg(**err24) # stbname + err25 = {"col": "db"} + self.checkmavg(**err25) # datbasename + err26 = {"col": "True"} + self.checkmavg(**err26) # col: BOOL 1 + err27 = {"col": True} + self.checkmavg(**err27) # col: BOOL 2 + err28 = {"col": "*"} + self.checkmavg(**err28) # col: all col + err29 = {"func": "mavg[", "r_comm": "]"} + self.checkmavg(**err29) # form: mavg[col, k] + err30 = {"func": "mavg{", "r_comm": "}"} + self.checkmavg(**err30) # form: mavg{col, k} + err31 = {"col": "[c1]"} + self.checkmavg(**err31) # form: mavg([col], k) + err32 = {"col": "c1, c2"} + self.checkmavg(**err32) # form: mavg(col, col2, k) + err33 = {"col": "c1, 2"} + self.checkmavg(**err33) # form: mavg(col, k1, k2) + err34 = {"alias": ", count(c1)"} + self.checkmavg(**err34) # mix with aggregate function 1 + err35 = {"alias": ", avg(c1)"} + self.checkmavg(**err35) # mix with aggregate function 2 + err36 = {"alias": ", min(c1)"} + self.checkmavg(**err36) # mix with select function 1 + err37 = {"alias": ", top(c1, 5)"} + self.checkmavg(**err37) # mix with select function 2 + err38 = {"alias": ", spread(c1)"} + self.checkmavg(**err38) # mix with calculation function 1 + err39 = {"alias": ", diff(c1)"} + self.checkmavg(**err39) # mix with calculation function 2 + # err40 = {"alias": "+ 2"} + # self.checkmavg(**err40) # mix with arithmetic 1 + #tdSql.query(" select mavg( c1 , 1 ) + 2 from t1 ") + err41 = {"alias": "+ avg(c1)"} + self.checkmavg(**err41) # mix with arithmetic 2 + err42 = {"alias": ", c1"} + self.checkmavg(**err42) # mix with other col + # err43 = {"table_expr": "stb1"} + # self.checkmavg(**err43) # select stb directly + err44 = { + "col": "stb1.c1", + "table_expr": "stb1, stb2", + "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts" + } + self.checkmavg(**err44) # stb join + err45 = { + "condition": "where ts>0 and ts < now interval(1h) fill(next)" + } + self.checkmavg(**err45) # interval + err46 = { + "table_expr": "t1", + "condition": "group by c6" + } + self.checkmavg(**err46) # group by normal col + err47 = { + "table_expr": "stb1", + "condition": "group by tbname slimit 1 " + } + # self.checkmavg(**err47) # with slimit + err48 = { + "table_expr": "stb1", + "condition": "group by tbname slimit 1 soffset 1" + } + # self.checkmavg(**err48) # with soffset + err49 = {"k": "2021-01-01 00:00:00.000"} + self.checkmavg(**err49) # k: timestamp + err50 = {"k": False} + self.checkmavg(**err50) # k: False + err51 = {"k": "%"} + self.checkmavg(**err51) # k: special char + err52 = {"k": ""} + self.checkmavg(**err52) # k: "" + err53 = {"k": None} + self.checkmavg(**err53) # k: None + err54 = {"k": "NULL"} + self.checkmavg(**err54) # k: null + err55 = {"k": "binary(4)"} + self.checkmavg(**err55) # k: string + err56 = {"k": "c1"} + self.checkmavg(**err56) # k: sring,col name + err57 = {"col": "c1, 1, c2"} + self.checkmavg(**err57) # form: mavg(col1, k1, col2, k2) + err58 = {"col": "c1 cc1"} + self.checkmavg(**err58) # form: mavg(col newname, k) + err59 = {"k": "'1'"} + # self.checkmavg(**err59) # formL mavg(colm, "1") + err60 = {"k": "-1-(-2)"} + # self.checkmavg(**err60) # formL mavg(colm, -1-2) + err61 = {"k": 1001} + self.checkmavg(**err61) # k: right out of [1, 1000] + err62 = {"k": -1} + self.checkmavg(**err62) # k: negative number + err63 = {"k": 0} + self.checkmavg(**err63) # k: 0 + err64 = {"k": 2**63-1} + self.checkmavg(**err64) # k: max(bigint) + err65 = {"k": 1-2**63} + # self.checkmavg(**err65) # k: min(bigint) + err66 = {"k": -2**63} + self.checkmavg(**err66) # k: NULL + err67 = {"k": 0.999999} + self.checkmavg(**err67) # k: left out of [1, 1000] + err68 = { + "table_expr": "stb1", + "condition": "group by tbname order by tbname" # order by tbname not supported + } + self.checkmavg(**err68) + + pass + + def mavg_test_data(self, tbnum:int, data_row:int, basetime:int) -> None : + for i in range(tbnum): + for j in range(data_row): + tdSql.execute( + f"insert into t{i} values (" + f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " + f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " + f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" + ) + + tdSql.execute( + f"insert into t{i} values (" + f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, " + f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, " + f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )" + ) + tdSql.execute( + f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" + ) + + pass + + def mavg_test_table(self,tbnum: int) -> None : + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute( + "create stable db.stb1 (\ + ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \ + c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\ + ) \ + tags(st1 int)" + ) + tdSql.execute( + "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)" + ) + for i in range(tbnum): + tdSql.execute(f"create table t{i} using stb1 tags({i})") + tdSql.execute(f"create table tt{i} using stb2 tags({i})") + + pass + + def mavg_test_run(self) : + tdLog.printNoPrefix("==========TD-10594==========") + tbnum = 10 + nowtime = int(round(time.time() * 1000)) + per_table_rows = 2 + self.mavg_test_table(tbnum) + + tdLog.printNoPrefix("######## no data test:") + self.mavg_current_query() + self.mavg_error_query() + + tdLog.printNoPrefix("######## insert only NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime - 5})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime + 5})") + self.mavg_current_query() + self.mavg_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the max(bigint/double):") + # self.mavg_test_table(tbnum) + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + # self.mavg_current_query() + # self.mavg_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the min(bigint/double):") + # self.mavg_test_table(tbnum) + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})") + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})") + # self.mavg_current_query() + # self.mavg_error_query() + + tdLog.printNoPrefix("######## insert data without NULL data test:") + self.mavg_test_table(tbnum) + self.mavg_test_data(tbnum, per_table_rows, nowtime) + self.mavg_current_query() + self.mavg_error_query() + + + tdLog.printNoPrefix("######## insert data mix with NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime-(per_table_rows+3)*10})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime+(per_table_rows+3)*10})") + self.mavg_current_query() + self.mavg_error_query() + + + + tdLog.printNoPrefix("######## check after WAL test:") + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + self.mavg_current_query() + self.mavg_error_query() + + def run(self): + import traceback + try: + # run in develop branch + self.mavg_test_run() + pass + except Exception as e: + traceback.print_exc() + raise e + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/2-query/nestedQuery_str.py b/tests/system-test/2-query/nestedQuery_str.py new file mode 100755 index 0000000000000000000000000000000000000000..8214c98c5cc8526874db5f40df22f8e587ea36f4 --- /dev/null +++ b/tests/system-test/2-query/nestedQuery_str.py @@ -0,0 +1,5753 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import random +import os +import time +import taos +import subprocess +from faker import Faker +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +from util.dnodes import tdDnodes +from util.dnodes import * + +class TDTestCase: + updatecfgDict = {'maxSQLLength':1048576,'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , + "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, + "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143} + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.testcasePath = os.path.split(__file__)[0] + self.testcaseFilename = os.path.split(__file__)[-1] + os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename)) + + self.num = 10 + self.fornum = 5 + + self.db_nest = "nest" + self.dropandcreateDB_random("%s" %self.db_nest, 1) + + # regular column select + #q_select= ['ts' , '*' , 'q_int', 'q_bigint' , 'q_bigint' , 'q_smallint' , 'q_tinyint' , 'q_bool' , 'q_binary' , 'q_nchar' ,'q_float' , 'q_double' ,'q_ts '] + self.q_select= ['ts' , 'q_int', 'q_bigint' , 'q_bigint' , 'q_smallint' , 'q_tinyint' , 'q_bool' , 'q_binary' , 'q_nchar' ,'q_float' , 'q_double' ,'q_ts ', 'q_int_null ', 'q_bigint_null ' , 'q_bigint_null ' , 'q_smallint_null ' , 'q_tinyint_null ' , 'q_bool_null ' , 'q_binary_null ' , 'q_nchar_null ' ,'q_float_null ' , 'q_double_null ' ,'q_ts_null '] + + # tag column select + #t_select= ['*' , 'loc' ,'t_int', 't_bigint' , 't_bigint' , 't_smallint' , 't_tinyint' , 't_bool' , 't_binary' , 't_nchar' ,'t_float' , 't_double' ,'t_ts '] + self.t_select= ['loc','t_int', 't_bigint' , 't_bigint' , 't_smallint' , 't_tinyint' , 't_bool' , 't_binary' , 't_nchar' ,'t_float' , 't_double' ,'t_ts '] + + # regular and tag column select + self.qt_select= self.q_select + self.t_select + + # distinct regular column select + self.dq_select= ['distinct q_int', 'distinct q_bigint' , 'distinct q_smallint' , 'distinct q_tinyint' , + 'distinct q_bool' , 'distinct q_binary' , 'distinct q_nchar' ,'distinct q_float' , 'distinct q_double' ,'distinct q_ts '] + + # distinct tag column select + self.dt_select= ['distinct loc', 'distinct t_int', 'distinct t_bigint' , 'distinct t_smallint' , 'distinct t_tinyint' , + 'distinct t_bool' , 'distinct t_binary' , 'distinct t_nchar' ,'distinct t_float' , 'distinct t_double' ,'distinct t_ts '] + + # distinct regular and tag column select + self.dqt_select= self.dq_select + self.dt_select + + # special column select + self.s_r_select= ['_c0', '_rowts' , '_C0' ] + self.s_s_select= ['tbname' , '_rowts' , '_c0', '_C0' ] + self.unionall_or_union= [ ' union ' , ' union all ' ] + + # regular column where + self.q_where = ['ts < now +1s','q_bigint >= -9223372036854775807 and q_bigint <= 9223372036854775807', 'q_int <= 2147483647 and q_int >= -2147483647', + 'q_smallint >= -32767 and q_smallint <= 32767','q_tinyint >= -127 and q_tinyint <= 127','q_float >= -1.7E308 and q_float <= 1.7E308', + 'q_double >= -1.7E308 and q_double <= 1.7E308', 'q_binary like \'binary%\' or q_binary = \'0\' ' , 'q_nchar like \'nchar%\' or q_nchar = \'0\' ' , + 'q_bool = true or q_bool = false' , 'q_bool in (0 , 1)' , 'q_bool in ( true , false)' , 'q_bool = 0 or q_bool = 1', + 'q_bigint between -9223372036854775807 and 9223372036854775807',' q_int between -2147483647 and 2147483647','q_smallint between -32767 and 32767', + 'q_bigint not between 9223372036854775807 and -9223372036854775807','q_int not between 2147483647 and -2147483647','q_smallint not between 32767 and -32767', + 'q_tinyint between -127 and 127 ','q_float >= -3.4E38 ','q_float <= 3.4E38 ','q_double >= -1.7E308 ', + 'q_double <= 1.7E308 ','q_float between -3.4E38 and 3.4E38 ','q_double between -1.7E308 and 1.7E308 ' ,'q_float not between 3.4E38 and -3.4E38 ','q_double not between 1.7E308 and -1.7E308 ', + 'q_float is not null ' ,'q_double is not null ' ,'q_binary match \'binary\' ','q_binary nmatch \'binarynchar\' ','q_nchar match \'nchar\' ','q_nchar nmatch \'binarynchar\' ', + 'q_binary like \'binary%\' ','(q_binary like \'binary%\' or q_nchar = \'0\' or q_binary = \'binary_\' ) ','q_nchar like \'nchar%\' ','(q_nchar like \'nchar%\' or q_binary = \'0\' or q_nchar = \'nchar_\' ) ',] + #TD-6201 ,'q_bool between 0 and 1' + + # regular column where for test union,join + self.q_u_where = ['t1.ts < now +1s' , 't2.ts < now +1s','t1.q_bigint >= -9223372036854775807 and t1.q_bigint <= 9223372036854775807 and t2.q_bigint >= -9223372036854775807 and t2.q_bigint <= 9223372036854775807', + 't1.q_int <= 2147483647 and t1.q_int >= -2147483647 and t2.q_int <= 2147483647 and t2.q_int >= -2147483647', + 't1.q_smallint >= -32767 and t1.q_smallint <= 32767 and t2.q_smallint >= -32767 and t2.q_smallint <= 32767', + 't1.q_tinyint >= -127 and t1.q_tinyint <= 127 and t2.q_tinyint >= -127 and t2.q_tinyint <= 127', + 't1.q_float >= - 1.7E308 and t1.q_float <= 1.7E308 and t2.q_float >= - 1.7E308 and t2.q_float <= 1.7E308', + 't1.q_double >= - 1.7E308 and t1.q_double <= 1.7E308 and t2.q_double >= - 1.7E308 and t2.q_double <= 1.7E308', + 't1.q_binary like \'binary%\' and t2.q_binary like \'binary%\' ' , + 't1.q_nchar like \'nchar%\' and t2.q_nchar like \'nchar%\' ' , + 't1.q_bool in (0 , 1) and t2.q_bool in (0 , 1)' , 't1.q_bool in ( true , false) and t2.q_bool in ( true , false)' , + 't1.q_bigint between -9223372036854775807 and 9223372036854775807 and t2.q_bigint between -9223372036854775807 and 9223372036854775807', + 't1.q_int between -2147483647 and 2147483647 and t2.q_int between -2147483647 and 2147483647', + 't1.q_smallint between -32767 and 32767 and t2.q_smallint between -32767 and 32767', + 't1.q_tinyint between -127 and 127 and t2.q_tinyint between -127 and 127 ','t1.q_float between -1.7E308 and 1.7E308 and t2.q_float between -1.7E308 and 1.7E308', + 't1.q_double between -1.7E308 and 1.7E308 and t2.q_double between -1.7E308 and 1.7E308', + 't1.q_bigint not between 9223372036854775807 and -9223372036854775807 and t2.q_bigint not between 9223372036854775807 and -9223372036854775807', + 't1.q_int not between 2147483647 and -2147483647 and t2.q_int not between 2147483647 and -2147483647', + 't1.q_smallint not between 32767 and -32767 and t2.q_smallint not between 32767 and -32767', + 't1.q_tinyint not between 127 and -127 and t2.q_tinyint not between 127 and -127 ','t1.q_float not between -1.7E308 and -1.7E308 and t2.q_float not between 1.7E308 and -1.7E308', + 't1.q_double not between 1.7E308 and -1.7E308 and t2.q_double not between 1.7E308 and -1.7E308'] + #TD-6201 ,'t1.q_bool between 0 and 1 or t2.q_bool between 0 and 1'] + #'t1.q_bool = true and t1.q_bool = false and t2.q_bool = true and t2.q_bool = false' , 't1.q_bool = 0 and t1.q_bool = 1 and t2.q_bool = 0 and t2.q_bool = 1' , + + self.q_u_or_where = ['(t1.q_binary like \'binary%\' or t1.q_binary = \'0\' or t2.q_binary like \'binary%\' or t2.q_binary = \'0\' )' , + '(t1.q_nchar like \'nchar%\' or t1.q_nchar = \'0\' or t2.q_nchar like \'nchar%\' or t2.q_nchar = \'0\' )' , '(t1.q_bool = true or t1.q_bool = false or t2.q_bool = true or t2.q_bool = false)' , + '(t1.q_bool in (0 , 1) or t2.q_bool in (0 , 1))' , '(t1.q_bool in ( true , false) or t2.q_bool in ( true , false))' , '(t1.q_bool = 0 or t1.q_bool = 1 or t2.q_bool = 0 or t2.q_bool = 1)' , + '(t1.q_bigint between -9223372036854775807 and 9223372036854775807 or t2.q_bigint between -9223372036854775807 and 9223372036854775807)', + '(t1.q_int between -2147483647 and 2147483647 or t2.q_int between -2147483647 and 2147483647)', + '(t1.q_smallint between -32767 and 32767 or t2.q_smallint between -32767 and 32767)', + '(t1.q_tinyint between -127 and 127 or t2.q_tinyint between -127 and 127 )','(t1.q_float between -1.7E308 and 1.7E308 or t2.q_float between -1.7E308 and 1.7E308)', + '(t1.q_double between -1.7E308 and 1.7E308 or t2.q_double between -1.7E308 and 1.7E308)'] + + # tag column where + self.t_where = ['ts < now +1s','t_bigint >= -9223372036854775807 and t_bigint <= 9223372036854775807','t_int <= 2147483647 and t_int >= -2147483647', + 't_smallint >= -32767 and t_smallint <= 32767','q_tinyint >= -127 and t_tinyint <= 127','t_float >= -1.7E308 and t_float <= 1.7E308', + 't_double >= -1.7E308 and t_double <= 1.7E308', 't_binary like \'binary%\' or t_binary = \'0\' ' , 't_nchar like \'nchar%\' or t_nchar = \'0\'' , + 't_bool = true or t_bool = false' , 't_bool in (0 , 1)' , 't_bool in ( true , false)' , 't_bool = 0 or t_bool = 1', + 't_bigint between -9223372036854775807 and 9223372036854775807',' t_int between -2147483647 and 2147483647','t_smallint between -32767 and 32767', + 't_tinyint between -127 and 127 ','t_float between -1.7E308 and 1.7E308','t_double between -1.7E308 and 1.7E308', + 't_binary match \'binary\' ','t_binary nmatch \'binarynchar\' ','t_nchar match \'nchar\' ','t_nchar nmatch \'binarynchar\' ', + 't_binary like \'binary%\' ','t_nchar like \'nchar%\' ','(t_binary like \'binary%\' or t_nchar = \'0\' ) ','(t_nchar like \'nchar%\' or t_binary = \'0\' ) ',] + #TD-6201,'t_bool between 0 and 1' + + # tag column where for test union,join | this is not support + self.t_u_where = ['t1.ts < now +1s' , 't2.ts < now +1s','t1.t_bigint >= -9223372036854775807 and t1.t_bigint <= 9223372036854775807 and t2.t_bigint >= -9223372036854775807 and t2.t_bigint <= 9223372036854775807', + 't1.t_int <= 2147483647 and t1.t_int >= -2147483647 and t2.t_int <= 2147483647 and t2.t_int >= -2147483647', + 't1.t_smallint >= -32767 and t1.t_smallint <= 32767 and t2.t_smallint >= -32767 and t2.t_smallint <= 32767', + 't1.t_tinyint >= -127 and t1.t_tinyint <= 127 and t2.t_tinyint >= -127 and t2.t_tinyint <= 127', + 't1.t_float >= -1.7E308 and t1.t_float <= 1.7E308 and t2.t_float >= -1.7E308 and t2.t_float <= 1.7E308', + 't1.t_double >= -1.7E308 and t1.t_double <= 1.7E308 and t2.t_double >= -1.7E308 and t2.t_double <= 1.7E308', + '(t1.t_binary like \'binary%\' or t1.t_binary = \'0\' or t2.t_binary like \'binary%\' or t2.t_binary = \'0\') ' , + '(t1.t_nchar like \'nchar%\' or t1.t_nchar = \'0\' or t2.t_nchar like \'nchar%\' or t2.t_nchar = \'0\' )' , '(t1.t_bool = true or t1.t_bool = false or t2.t_bool = true or t2.t_bool = false)' , + 't1.t_bool in (0 , 1) and t2.t_bool in (0 , 1)' , 't1.t_bool in ( true , false) and t2.t_bool in ( true , false)' , '(t1.t_bool = 0 or t1.t_bool = 1 or t2.t_bool = 0 or t2.t_bool = 1)', + 't1.t_bigint between -9223372036854775807 and 9223372036854775807 and t2.t_bigint between -9223372036854775807 and 9223372036854775807', + 't1.t_int between -2147483647 and 2147483647 and t2.t_int between -2147483647 and 2147483647', + 't1.t_smallint between -32767 and 32767 and t2.t_smallint between -32767 and 32767', + '(t1.t_tinyint between -127 and 127 and t2.t_tinyint between -127 and 127) ','t1.t_float between -1.7E308 and 1.7E308 and t2.t_float between -1.7E308 and 1.7E308', + '(t1.t_double between -1.7E308 and 1.7E308 and t2.t_double between -1.7E308 and 1.7E308)'] + #TD-6201,'t1.t_bool between 0 and 1 or t2.q_bool between 0 and 1'] + + self.t_u_or_where = ['(t1.t_binary like \'binary%\' or t1.t_binary = \'0\' or t2.t_binary like \'binary%\' or t2.t_binary = \'0\' )' , + '(t1.t_nchar like \'nchar%\' or t1.t_nchar = \'0\' or t2.t_nchar like \'nchar%\' or t2.t_nchar = \'0\' )' , '(t1.t_bool = true or t1.t_bool = false or t2.t_bool = true or t2.t_bool = false)' , + '(t1.t_bool in (0 , 1) or t2.t_bool in (0 , 1))' , '(t1.t_bool in ( true , false) or t2.t_bool in ( true , false))' , '(t1.t_bool = 0 or t1.t_bool = 1 or t2.t_bool = 0 or t2.t_bool = 1)', + '(t1.t_bigint between -9223372036854775807 and 9223372036854775807 or t2.t_bigint between -9223372036854775807 and 9223372036854775807)', + '(t1.t_int between -2147483647 and 2147483647 or t2.t_int between -2147483647 and 2147483647)', + '(t1.t_smallint between -32767 and 32767 or t2.t_smallint between -32767 and 32767)', + '(t1.t_tinyint between -127 and 127 or t2.t_tinyint between -127 and 127 )','(t1.t_float between -1.7E308 and 1.7E308 or t2.t_float between -1.7E308 and 1.7E308)', + '(t1.t_double between -1.7E308 and 1.7E308 or t2.t_double between -1.7E308 and 1.7E308)'] + + # regular and tag column where + self.qt_where = self.q_where + self.t_where + self.qt_u_where = self.q_u_where + self.t_u_where + # now,qt_u_or_where is not support + self.qt_u_or_where = self.q_u_or_where + self.t_u_or_where + + # tag column where for test super join | this is support , 't1.t_bool = t2.t_bool ' ??? + self.t_join_where = ['t1.t_bigint = t2.t_bigint ', 't1.t_int = t2.t_int ', 't1.t_smallint = t2.t_smallint ', 't1.t_tinyint = t2.t_tinyint ', + 't1.t_float = t2.t_float ', 't1.t_double = t2.t_double ', 't1.t_binary = t2.t_binary ' , 't1.t_nchar = t2.t_nchar ' ] + + # session && fill + self.session_where = ['session(ts,10a)' , 'session(ts,10s)', 'session(ts,10m)' , 'session(ts,10h)','session(ts,10d)' , 'session(ts,10w)'] + self.session_u_where = ['session(t1.ts,10a)' , 'session(t1.ts,10s)', 'session(t1.ts,10m)' , 'session(t1.ts,10h)','session(t1.ts,10d)' , 'session(t1.ts,10w)', + 'session(t2.ts,10a)' , 'session(t2.ts,10s)', 'session(t2.ts,10m)' , 'session(t2.ts,10h)','session(t2.ts,10d)' , 'session(t2.ts,10w)'] + + self.fill_where = ['FILL(NONE)','FILL(PREV)','FILL(NULL)','FILL(LINEAR)','FILL(NEXT)','FILL(VALUE, 1.23)'] + + self.state_window = ['STATE_WINDOW(q_tinyint)','STATE_WINDOW(q_bigint)','STATE_WINDOW(q_int)','STATE_WINDOW(q_bool)','STATE_WINDOW(q_smallint)'] + self.state_u_window = ['STATE_WINDOW(t1.q_tinyint)','STATE_WINDOW(t1.q_bigint)','STATE_WINDOW(t1.q_int)','STATE_WINDOW(t1.q_bool)','STATE_WINDOW(t1.q_smallint)', + 'STATE_WINDOW(t2.q_tinyint)','STATE_WINDOW(t2.q_bigint)','STATE_WINDOW(t2.q_int)','STATE_WINDOW(t2.q_bool)','STATE_WINDOW(t2.q_smallint)'] + + # order by where + self.order_where = ['order by ts' , 'order by ts asc'] + self.order_u_where = ['order by t1.ts' , 'order by t1.ts asc' , 'order by t2.ts' , 'order by t2.ts asc'] + self.order_desc_where = ['order by ts' , 'order by ts asc' , 'order by ts desc' ] + self.orders_desc_where = ['order by ts' , 'order by ts asc' , 'order by ts desc' , 'order by loc' , 'order by loc asc' , 'order by loc desc'] + + self.group_where = ['group by tbname , loc' , 'group by tbname', 'group by tbname, t_bigint', 'group by tbname,t_int', 'group by tbname, t_smallint', 'group by tbname,t_tinyint', + 'group by tbname,t_float', 'group by tbname,t_double' , 'group by tbname,t_binary', 'group by tbname,t_nchar', 'group by tbname,t_bool' ,'group by tbname ,loc ,t_bigint', + 'group by tbname,t_binary ,t_nchar ,t_bool' , 'group by tbname,t_int ,t_smallint ,t_tinyint' , 'group by tbname,t_float ,t_double ' , + 'PARTITION BY tbname , loc' , 'PARTITION BY tbname', 'PARTITION BY tbname, t_bigint', 'PARTITION BY tbname,t_int', 'PARTITION BY tbname, t_smallint', 'PARTITION BY tbname,t_tinyint', + 'PARTITION BY tbname,t_float', 'PARTITION BY tbname,t_double' , 'PARTITION BY tbname,t_binary', 'PARTITION BY tbname,t_nchar', 'PARTITION BY tbname,t_bool' ,'PARTITION BY tbname ,loc ,t_bigint', + 'PARTITION BY tbname,t_binary ,t_nchar ,t_bool' , 'PARTITION BY tbname,t_int ,t_smallint ,t_tinyint' , 'PARTITION BY tbname,t_float ,t_double '] + self.group_where_j = ['group by t1.loc' , 'group by t1.t_bigint', 'group by t1.t_int', 'group by t1.t_smallint', 'group by t1.t_tinyint', + 'group by t1.t_float', 'group by t1.t_double' , 'group by t1.t_binary', 'group by t1.t_nchar', 'group by t1.t_bool' ,'group by t1.loc ,t1.t_bigint', + 'group by t1.t_binary ,t1.t_nchar ,t1.t_bool' , 'group by t1.t_int ,t1.t_smallint ,t1.t_tinyint' , 'group by t1.t_float ,t1.t_double ' , + 'PARTITION BY t1.loc' , 'PARTITION by t1.t_bigint', 'PARTITION by t1.t_int', 'PARTITION by t1.t_smallint', 'PARTITION by t1.t_tinyint', + 'PARTITION by t1.t_float', 'PARTITION by t1.t_double' , 'PARTITION by t1.t_binary', 'PARTITION by t1.t_nchar', 'PARTITION by t1.t_bool' ,'PARTITION BY t1.loc ,t1.t_bigint', + 'PARTITION by t1.t_binary ,t1.t_nchar ,t1.t_bool' , 'PARTITION by t1.t_int ,t1.t_smallint ,t1.t_tinyint' , 'PARTITION by t1.t_float ,t1.t_double ', + 'group by t2.loc' , 'group by t2.t_bigint', 'group by t2.t_int', 'group by t2.t_smallint', 'group by t2.t_tinyint', + 'group by t2.t_float', 'group by t2.t_double' , 'group by t2.t_binary', 'group by t2.t_nchar', 'group by t2.t_bool' ,'group by t2.loc ,t2.t_bigint', + 'group by t2.t_binary ,t2.t_nchar ,t2.t_bool' , 'group by t2.t_int ,t2.t_smallint ,t2.t_tinyint' , 'group by t2.t_float ,t2.t_double ' , + 'PARTITION BY t2.loc' , 'PARTITION by t2.t_bigint', 'PARTITION by t2.t_int', 'PARTITION by t2.t_smallint', 'PARTITION by t2.t_tinyint', + 'PARTITION by t2.t_float', 'PARTITION by t2.t_double' , 'PARTITION by t2.t_binary', 'PARTITION by t2.t_nchar', 'PARTITION by t2.t_bool' ,'PARTITION BY t2.loc ,t2.t_bigint', + 'PARTITION by t2.t_binary ,t2.t_nchar ,t2.t_bool' , 'PARTITION by t2.t_int ,t2.t_smallint ,t2.t_tinyint' , 'PARTITION by t2.t_float ,t2.t_double '] + + self.partiton_where = ['PARTITION BY tbname , loc' , 'PARTITION BY tbname', 'PARTITION BY tbname, t_bigint', 'PARTITION BY tbname,t_int', 'PARTITION BY tbname, t_smallint', 'PARTITION BY tbname,t_tinyint', + 'PARTITION BY tbname,t_float', 'PARTITION BY tbname,t_double' , 'PARTITION BY tbname,t_binary', 'PARTITION BY tbname,t_nchar', 'PARTITION BY tbname,t_bool' ,'PARTITION BY tbname ,loc ,t_bigint', + 'PARTITION BY tbname,t_binary ,t_nchar ,t_bool' , 'PARTITION BY tbname,t_int ,t_smallint ,t_tinyint' , 'PARTITION BY tbname,t_float ,t_double '] + self.partiton_where_j = ['PARTITION BY t1.loc' , 'PARTITION by t1.t_bigint', 'PARTITION by t1.t_int', 'PARTITION by t1.t_smallint', 'PARTITION by t1.t_tinyint', + 'PARTITION by t1.t_float', 'PARTITION by t1.t_double' , 'PARTITION by t1.t_binary', 'PARTITION by t1.t_nchar', 'PARTITION by t1.t_bool' ,'PARTITION BY t1.loc ,t1.t_bigint', + 'PARTITION by t1.t_binary ,t1.t_nchar ,t1.t_bool' , 'PARTITION by t1.t_int ,t1.t_smallint ,t1.t_tinyint' , 'PARTITION by t1.t_float ,t1.t_double ', + 'PARTITION BY t2.loc' , 'PARTITION by t2.t_bigint', 'PARTITION by t2.t_int', 'PARTITION by t2.t_smallint', 'PARTITION by t2.t_tinyint', + 'PARTITION by t2.t_float', 'PARTITION by t2.t_double' , 'PARTITION by t2.t_binary', 'PARTITION by t2.t_nchar', 'PARTITION by t2.t_bool' ,'PARTITION BY t2.loc ,t2.t_bigint', + 'PARTITION by t2.t_binary ,t2.t_nchar ,t2.t_bool' , 'PARTITION by t2.t_int ,t2.t_smallint ,t2.t_tinyint' , 'PARTITION by t2.t_float ,t2.t_double '] + + + self.group_where_regular = ['group by tbname ' , 'group by tbname', 'group by tbname, q_bigint', 'group by tbname,q_int', 'group by tbname, q_smallint', 'group by tbname,q_tinyint', + 'group by tbname,q_float', 'group by tbname,q_double' , 'group by tbname,q_binary', 'group by tbname,q_nchar', 'group by tbname,q_bool' ,'group by tbname ,q_bigint', + 'group by tbname,q_binary ,q_nchar ,q_bool' , 'group by tbname,q_int ,q_smallint ,q_tinyint' , 'group by tbname,q_float ,q_double ' , + 'PARTITION BY tbname ' , 'PARTITION BY tbname', 'PARTITION BY tbname, q_bigint', 'PARTITION BY tbname,q_int', 'PARTITION BY tbname, q_smallint', 'PARTITION BY tbname,q_tinyint', + 'PARTITION BY tbname,q_float', 'PARTITION BY tbname,q_double' , 'PARTITION BY tbname,q_binary', 'PARTITION BY tbname,q_nchar', 'PARTITION BY tbname,q_bool' ,'PARTITION BY tbname ,q_bigint', + 'PARTITION BY tbname,q_binary ,q_nchar ,q_bool' , 'PARTITION BY tbname,q_int ,q_smallint ,q_tinyint' , 'PARTITION BY tbname,q_float ,q_double '] + self.group_where_regular_j = ['group by t1.q_bigint', 'group by t1.q_int', 'group by t1.q_smallint', 'group by t1.q_tinyint', + 'group by t1.q_float', 'group by t1.q_double' , 'group by t1.q_binary', 'group by t1.q_nchar', 'group by t1.q_bool' ,'group by t1.q_bigint', + 'group by t1.q_binary ,t1.q_nchar ,t1.q_bool' , 'group by t1.q_int ,t1.q_smallint ,t1.q_tinyint' , 'group by t1.q_float ,t1.q_double ' , + 'PARTITION by t1.q_bigint', 'PARTITION by t1.q_int', 'PARTITION by t1.q_smallint', 'PARTITION by t1.q_tinyint', + 'PARTITION by t1.q_float', 'PARTITION by t1.q_double' , 'PARTITION by t1.q_binary', 'PARTITION by t1.q_nchar', 'PARTITION by t1.q_bool' ,'PARTITION BY t1.q_bigint', + 'PARTITION by t1.q_binary ,t1.q_nchar ,t1.q_bool' , 'PARTITION by t1.q_int ,t1.q_smallint ,t1.q_tinyint' , 'PARTITION by t1.q_float ,t1.q_double ', + 'group by t2.q_bigint', 'group by t2.q_int', 'group by t2.q_smallint', 'group by t2.q_tinyint', + 'group by t2.q_float', 'group by t2.q_double' , 'group by t2.q_binary', 'group by t2.q_nchar', 'group by t2.q_bool' ,'group by t2.q_bigint', + 'group by t2.q_binary ,t2.q_nchar ,t2.q_bool' , 'group by t2.q_int ,t2.q_smallint ,t2.q_tinyint' , 'group by t2.q_float ,t2.q_double ' , + 'PARTITION by t2.q_bigint', 'PARTITION by t2.q_int', 'PARTITION by t2.q_smallint', 'PARTITION by t2.q_tinyint', + 'PARTITION by t2.q_float', 'PARTITION by t2.q_double' , 'PARTITION by t2.q_binary', 'PARTITION by t2.q_nchar', 'PARTITION by t2.q_bool' ,'PARTITION BY t2.q_bigint', + 'PARTITION by t2.q_binary ,t2.q_nchar ,t2.q_bool' , 'PARTITION by t2.q_int ,t2.q_smallint ,t2.q_tinyint' , 'PARTITION by t2.q_float ,t2.q_double '] + + self.partiton_where_regular = ['PARTITION BY tbname ' , 'PARTITION BY tbname', 'PARTITION BY tbname, q_bigint', 'PARTITION BY tbname,q_int', 'PARTITION BY tbname, q_smallint', 'PARTITION BY tbname,q_tinyint', + 'PARTITION BY tbname,q_float', 'PARTITION BY tbname,q_double' , 'PARTITION BY tbname,q_binary', 'PARTITION BY tbname,q_nchar', 'PARTITION BY tbname,q_bool' ,'PARTITION BY tbname ,q_bigint', + 'PARTITION BY tbname,q_binary ,q_nchar ,q_bool' , 'PARTITION BY tbname,q_int ,q_smallint ,q_tinyint' , 'PARTITION BY tbname,q_float ,q_double '] + self.partiton_where_regular_j = ['PARTITION by t1.q_bigint', 'PARTITION by t1.q_int', 'PARTITION by t1.q_smallint', 'PARTITION by t1.q_tinyint', + 'PARTITION by t1.q_float', 'PARTITION by t1.q_double' , 'PARTITION by t1.q_binary', 'PARTITION by t1.q_nchar', 'PARTITION by t1.q_bool' ,'PARTITION BY t1.q_bigint', + 'PARTITION by t1.q_binary ,t1.q_nchar ,t1.q_bool' , 'PARTITION by t1.q_int ,t1.q_smallint ,t1.q_tinyint' , 'PARTITION by t1.q_float ,t1.q_double ', + 'PARTITION by t2.q_bigint', 'PARTITION by t2.q_int', 'PARTITION by t2.q_smallint', 'PARTITION by t2.q_tinyint', + 'PARTITION by t2.q_float', 'PARTITION by t2.q_double' , 'PARTITION by t2.q_binary', 'PARTITION by t2.q_nchar', 'PARTITION by t2.q_bool' ,'PARTITION BY t2.q_bigint', + 'PARTITION by t2.q_binary ,t2.q_nchar ,t2.q_bool' , 'PARTITION by t2.q_int ,t2.q_smallint ,t2.q_tinyint' , 'PARTITION by t2.q_float ,t2.q_double '] + + self.having_support = ['having count(q_int) > 0','having count(q_bigint) > 0','having count(q_smallint) > 0','having count(q_tinyint) > 0','having count(q_float) > 0','having count(q_double) > 0','having count(q_bool) > 0', + 'having avg(q_int) > 0','having avg(q_bigint) > 0','having avg(q_smallint) > 0','having avg(q_tinyint) > 0','having avg(q_float) > 0','having avg(q_double) > 0', + 'having sum(q_int) > 0','having sum(q_bigint) > 0','having sum(q_smallint) > 0','having sum(q_tinyint) > 0','having sum(q_float) > 0','having sum(q_double) > 0', + 'having STDDEV(q_int) > 0','having STDDEV(q_bigint) > 0','having STDDEV(q_smallint) > 0','having STDDEV(q_tinyint) > 0','having STDDEV(q_float) > 0','having STDDEV(q_double) > 0', + 'having TWA(q_int) > 0','having TWA(q_bigint) > 0','having TWA(q_smallint) > 0','having TWA(q_tinyint) > 0','having TWA(q_float) > 0','having TWA(q_double) > 0', + 'having IRATE(q_int) > 0','having IRATE(q_bigint) > 0','having IRATE(q_smallint) > 0','having IRATE(q_tinyint) > 0','having IRATE(q_float) > 0','having IRATE(q_double) > 0', + 'having MIN(q_int) > 0','having MIN(q_bigint) > 0','having MIN(q_smallint) > 0','having MIN(q_tinyint) > 0','having MIN(q_float) > 0','having MIN(q_double) > 0', + 'having MAX(q_int) > 0','having MAX(q_bigint) > 0','having MAX(q_smallint) > 0','having MAX(q_tinyint) > 0','having MAX(q_float) > 0','having MAX(q_double) > 0', + 'having FIRST(q_int) > 0','having FIRST(q_bigint) > 0','having FIRST(q_smallint) > 0','having FIRST(q_tinyint) > 0','having FIRST(q_float) > 0','having FIRST(q_double) > 0', + 'having LAST(q_int) > 0','having LAST(q_bigint) > 0','having LAST(q_smallint) > 0','having LAST(q_tinyint) > 0','having LAST(q_float) > 0','having LAST(q_double) > 0', + 'having APERCENTILE(q_int,10) > 0','having APERCENTILE(q_bigint,10) > 0','having APERCENTILE(q_smallint,10) > 0','having APERCENTILE(q_tinyint,10) > 0','having APERCENTILE(q_float,10) > 0','having APERCENTILE(q_double,10) > 0'] + self.having_not_support = ['having TOP(q_int,10) > 0','having TOP(q_bigint,10) > 0','having TOP(q_smallint,10) > 0','having TOP(q_tinyint,10) > 0','having TOP(q_float,10) > 0','having TOP(q_double,10) > 0','having TOP(q_bool,10) > 0', + 'having BOTTOM(q_int,10) > 0','having BOTTOM(q_bigint,10) > 0','having BOTTOM(q_smallint,10) > 0','having BOTTOM(q_tinyint,10) > 0','having BOTTOM(q_float,10) > 0','having BOTTOM(q_double,10) > 0','having BOTTOM(q_bool,10) > 0', + 'having LEASTSQUARES(q_int) > 0','having LEASTSQUARES(q_bigint) > 0','having LEASTSQUARES(q_smallint) > 0','having LEASTSQUARES(q_tinyint) > 0','having LEASTSQUARES(q_float) > 0','having LEASTSQUARES(q_double) > 0','having LEASTSQUARES(q_bool) > 0', + 'having FIRST(q_bool) > 0','having IRATE(q_bool) > 0','having PERCENTILE(q_bool,10) > 0','having avg(q_bool) > 0','having LAST_ROW(q_bool) > 0','having sum(q_bool) > 0','having STDDEV(q_bool) > 0','having APERCENTILE(q_bool,10) > 0','having TWA(q_bool) > 0','having LAST(q_bool) > 0', + 'having PERCENTILE(q_int,10) > 0','having PERCENTILE(q_bigint,10) > 0','having PERCENTILE(q_smallint,10) > 0','having PERCENTILE(q_tinyint,10) > 0','having PERCENTILE(q_float,10) > 0','having PERCENTILE(q_double,10) > 0'] + self.having_tagnot_support = ['having LAST_ROW(q_int) > 0','having LAST_ROW(q_bigint) > 0','having LAST_ROW(q_smallint) > 0','having LAST_ROW(q_tinyint) > 0','having LAST_ROW(q_float) > 0','having LAST_ROW(q_double) > 0'] + + self.having_support_j = ['having count(t1.q_int) > 0','having count(t1.q_bigint) > 0','having count(t1.q_smallint) > 0','having count(t1.q_tinyint) > 0','having count(t1.q_float) > 0','having count(t1.q_double) > 0','having count(t1.q_bool) > 0', + 'having avg(t1.q_int) > 0','having avg(t1.q_bigint) > 0','having avg(t1.q_smallint) > 0','having avg(t1.q_tinyint) > 0','having avg(t1.q_float) > 0','having avg(t1.q_double) > 0', + 'having sum(t1.q_int) > 0','having sum(t1.q_bigint) > 0','having sum(t1.q_smallint) > 0','having sum(t1.q_tinyint) > 0','having sum(t1.q_float) > 0','having sum(t1.q_double) > 0', + 'having STDDEV(t1.q_int) > 0','having STDDEV(t1.q_bigint) > 0','having STDDEV(t1.q_smallint) > 0','having STDDEV(t1.q_tinyint) > 0','having STDDEV(t1.q_float) > 0','having STDDEV(t1.q_double) > 0', + 'having TWA(t1.q_int) > 0','having TWA(t1.q_bigint) > 0','having TWA(t1.q_smallint) > 0','having TWA(t1.q_tinyint) > 0','having TWA(t1.q_float) > 0','having TWA(t1.q_double) > 0', + 'having IRATE(t1.q_int) > 0','having IRATE(t1.q_bigint) > 0','having IRATE(t1.q_smallint) > 0','having IRATE(t1.q_tinyint) > 0','having IRATE(t1.q_float) > 0','having IRATE(t1.q_double) > 0', + 'having MIN(t1.q_int) > 0','having MIN(t1.q_bigint) > 0','having MIN(t1.q_smallint) > 0','having MIN(t1.q_tinyint) > 0','having MIN(t1.q_float) > 0','having MIN(t1.q_double) > 0', + 'having MAX(t1.q_int) > 0','having MAX(t1.q_bigint) > 0','having MAX(t1.q_smallint) > 0','having MAX(t1.q_tinyint) > 0','having MAX(t1.q_float) > 0','having MAX(t1.q_double) > 0', + 'having FIRST(t1.q_int) > 0','having FIRST(t1.q_bigint) > 0','having FIRST(t1.q_smallint) > 0','having FIRST(t1.q_tinyint) > 0','having FIRST(t1.q_float) > 0','having FIRST(t1.q_double) > 0', + 'having LAST(t1.q_int) > 0','having LAST(t1.q_bigint) > 0','having LAST(t1.q_smallint) > 0','having LAST(t1.q_tinyint) > 0','having LAST(t1.q_float) > 0','having LAST(t1.q_double) > 0', + 'having APERCENTILE(t1.q_int,10) > 0','having APERCENTILE(t1.q_bigint,10) > 0','having APERCENTILE(t1.q_smallint,10) > 0','having APERCENTILE(t1.q_tinyint,10) > 0','having APERCENTILE(t1.q_float,10) > 0','having APERCENTILE(t1.q_double,10) > 0'] + + # limit offset where + self.limit_where = ['limit 1 offset 1' , 'limit 1' , 'limit 2 offset 1' , 'limit 2', 'limit 12 offset 1' , 'limit 20', 'limit 20 offset 10' , 'limit 200'] + self.limit1_where = ['limit 1 offset 1' , 'limit 1' ] + self.limit_u_where = ['limit 100 offset 10' , 'limit 50' , 'limit 100' , 'limit 10' ] + + # slimit soffset where + self.slimit_where = ['slimit 1 soffset 1' , 'slimit 1' , 'slimit 2 soffset 1' , 'slimit 2'] + self.slimit1_where = ['slimit 2 soffset 1' , 'slimit 1' ] + + # aggregate function include [all:count(*)\avg\sum\stddev ||regualr:twa\irate\leastsquares ||group by tbname:twa\irate\] + # select function include [all: min\max\first(*)\last(*)\top\bottom\apercentile\last_row(*)(not with interval)\interp(*)(FILL) ||regualr: percentile] + # calculation function include [all:spread\+-*/ ||regualr:diff\derivative ||group by tbname:diff\derivative\] + # **_ns_** express is not support stable, therefore, separated from regular tables + # calc_select_all calc_select_regular calc_select_in_ts calc_select_fill calc_select_not_interval + # calc_aggregate_all calc_aggregate_regular calc_aggregate_groupbytbname + # calc_calculate_all calc_calculate_regular calc_calculate_groupbytbname + + # calc_select_all calc_select_regular calc_select_in_ts calc_select_fill calc_select_not_interval + # select function include [all: min\max\first(*)\last(*)\top\bottom\apercentile\last_row(*)(not with interval)\interp(*)(FILL) ||regualr: percentile] + + self.calc_select_all = ['bottom(q_int,20)' , 'bottom(q_bigint,20)' , 'bottom(q_smallint,20)' , 'bottom(q_tinyint,20)' ,'bottom(q_float,20)' , 'bottom(q_double,20)' , + 'top(q_int,20)' , 'top(q_bigint,20)' , 'top(q_smallint,20)' ,'top(q_tinyint,20)' ,'top(q_float,20)' ,'top(q_double,20)' , + 'first(q_int)' , 'first(q_bigint)' , 'first(q_smallint)' , 'first(q_tinyint)' , 'first(q_float)' ,'first(q_double)' ,'first(q_binary)' ,'first(q_nchar)' ,'first(q_bool)' ,'first(q_ts)' , + 'last(q_int)' , 'last(q_bigint)' , 'last(q_smallint)' , 'last(q_tinyint)' , 'last(q_float)' ,'last(q_double)' , 'last(q_binary)' ,'last(q_nchar)' ,'last(q_bool)' ,'last(q_ts)' , + 'min(q_int)' , 'min(q_bigint)' , 'min(q_smallint)' , 'min(q_tinyint)' , 'min(q_float)' ,'min(q_double)' , + 'max(q_int)' , 'max(q_bigint)' , 'max(q_smallint)' , 'max(q_tinyint)' ,'max(q_float)' ,'max(q_double)' , + 'apercentile(q_int,20)' , 'apercentile(q_bigint,20)' ,'apercentile(q_smallint,20)' ,'apercentile(q_tinyint,20)' ,'apercentile(q_float,20)' ,'apercentile(q_double,20)' , + 'last_row(q_int)' , 'last_row(q_bigint)' , 'last_row(q_smallint)' , 'last_row(q_tinyint)' , 'last_row(q_float)' , + 'last_row(q_double)' , 'last_row(q_bool)' ,'last_row(q_binary)' ,'last_row(q_nchar)' ,'last_row(q_ts)'] + + self.calc_select_in_ts = ['bottom(q_int,20)' , 'bottom(q_bigint,20)' , 'bottom(q_smallint,20)' , 'bottom(q_tinyint,20)' ,'bottom(q_float,20)' , 'bottom(q_double,20)' , + 'top(q_int,20)' , 'top(q_bigint,20)' , 'top(q_smallint,20)' ,'top(q_tinyint,20)' ,'top(q_float,20)' ,'top(q_double,20)' , + 'first(q_int)' , 'first(q_bigint)' , 'first(q_smallint)' , 'first(q_tinyint)' , 'first(q_float)' ,'first(q_double)' ,'first(q_binary)' ,'first(q_nchar)' ,'first(q_bool)' ,'first(q_ts)' , + 'last(q_int)' , 'last(q_bigint)' , 'last(q_smallint)' , 'last(q_tinyint)' , 'last(q_float)' ,'last(q_double)' , 'last(q_binary)' ,'last(q_nchar)' ,'last(q_bool)' ,'last(q_ts)' ] + + self.calc_select_in = ['min(q_int)' , 'min(q_bigint)' , 'min(q_smallint)' , 'min(q_tinyint)' , 'min(q_float)' ,'min(q_double)' , + 'max(q_int)' , 'max(q_bigint)' , 'max(q_smallint)' , 'max(q_tinyint)' ,'max(q_float)' ,'max(q_double)' , + 'apercentile(q_int,20)' , 'apercentile(q_bigint,20)' ,'apercentile(q_smallint,20)' ,'apercentile(q_tinyint,20)' ,'apercentile(q_float,20)' ,'apercentile(q_double,20)' , + 'last_row(q_int)' , 'last_row(q_bigint)' , 'last_row(q_smallint)' , 'last_row(q_tinyint)' , 'last_row(q_float)' , + 'last_row(q_double)' , 'last_row(q_bool)' ,'last_row(q_binary)' ,'last_row(q_nchar)' ,'last_row(q_ts)'] + + self.calc_select_not_support_ts = ['first(q_int)' , 'first(q_bigint)' , 'first(q_smallint)' , 'first(q_tinyint)' , 'first(q_float)' ,'first(q_double)' ,'first(q_binary)' ,'first(q_nchar)' ,'first(q_bool)' ,'first(q_ts)' , + 'last(q_int)' , 'last(q_bigint)' , 'last(q_smallint)' , 'last(q_tinyint)' , 'last(q_float)' ,'last(q_double)' , 'last(q_binary)' ,'last(q_nchar)' ,'last(q_bool)' ,'last(q_ts)' , + 'last_row(q_int)' , 'last_row(q_bigint)' , 'last_row(q_smallint)' , 'last_row(q_tinyint)' , 'last_row(q_float)' , + 'last_row(q_double)' , 'last_row(q_bool)' ,'last_row(q_binary)' ,'last_row(q_nchar)' ,'last_row(q_ts)', + 'apercentile(q_int,20)' , 'apercentile(q_bigint,20)' ,'apercentile(q_smallint,20)' ,'apercentile(q_tinyint,20)' ,'apercentile(q_float,20)' ,'apercentile(q_double,20)'] + + self.calc_select_support_ts = ['bottom(q_int,20)' , 'bottom(q_bigint,20)' , 'bottom(q_smallint,20)' , 'bottom(q_tinyint,20)' ,'bottom(q_float,20)' , 'bottom(q_double,20)' , + 'top(q_int,20)' , 'top(q_bigint,20)' , 'top(q_smallint,20)' ,'top(q_tinyint,20)' ,'top(q_float,20)' ,'top(q_double,20)' , + 'min(q_int)' , 'min(q_bigint)' , 'min(q_smallint)' , 'min(q_tinyint)' , 'min(q_float)' ,'min(q_double)' , + 'max(q_int)' , 'max(q_bigint)' , 'max(q_smallint)' , 'max(q_tinyint)' ,'max(q_float)' ,'max(q_double)' ] + + self.calc_select_regular = [ 'PERCENTILE(q_int,10)' ,'PERCENTILE(q_bigint,20)' , 'PERCENTILE(q_smallint,30)' ,'PERCENTILE(q_tinyint,40)' ,'PERCENTILE(q_float,50)' ,'PERCENTILE(q_double,60)'] + + + self.calc_select_fill = ['INTERP(q_int)' ,'INTERP(q_bigint)' ,'INTERP(q_smallint)' ,'INTERP(q_tinyint)', 'INTERP(q_float)' ,'INTERP(q_double)'] + self.interp_where = ['ts = now' , 'ts = \'2020-09-13 20:26:40.000\'' , 'ts = \'2020-09-13 20:26:40.009\'' ,'tbname in (\'table_1\') and ts = now' ,'tbname in (\'table_0\' ,\'table_1\',\'table_2\',\'table_3\',\'table_4\',\'table_5\') and ts = \'2020-09-13 20:26:40.000\'','tbname like \'table%\' and ts = \'2020-09-13 20:26:40.002\''] + + #two table join + self.calc_select_in_ts_j = ['bottom(t1.q_int,20)' , 'bottom(t1.q_bigint,20)' , 'bottom(t1.q_smallint,20)' , 'bottom(t1.q_tinyint,20)' ,'bottom(t1.q_float,20)' , 'bottom(t1.q_double,20)' , + 'top(t1.q_int,20)' , 'top(t1.q_bigint,20)' , 'top(t1.q_smallint,20)' ,'top(t1.q_tinyint,20)' ,'top(t1.q_float,20)' ,'top(t1.q_double,20)' , + 'first(t1.q_int)' , 'first(t1.q_bigint)' , 'first(t1.q_smallint)' , 'first(t1.q_tinyint)' , 'first(t1.q_float)' ,'first(t1.q_double)' ,'first(t1.q_binary)' ,'first(t1.q_nchar)' ,'first(t1.q_bool)' ,'first(t1.q_ts)' , + 'last(t1.q_int)' , 'last(t1.q_bigint)' , 'last(t1.q_smallint)' , 'last(t1.q_tinyint)' , 'last(t1.q_float)' ,'last(t1.q_double)' , 'last(t1.q_binary)' ,'last(t1.q_nchar)' ,'last(t1.q_bool)' ,'last(t1.q_ts)' , + 'bottom(t2.q_int,20)' , 'bottom(t2.q_bigint,20)' , 'bottom(t2.q_smallint,20)' , 'bottom(t2.q_tinyint,20)' ,'bottom(t2.q_float,20)' , 'bottom(t2.q_double,20)' , + 'top(t2.q_int,20)' , 'top(t2.q_bigint,20)' , 'top(t2.q_smallint,20)' ,'top(t2.q_tinyint,20)' ,'top(t2.q_float,20)' ,'top(t2.q_double,20)' , + 'first(t2.q_int)' , 'first(t2.q_bigint)' , 'first(t2.q_smallint)' , 'first(t2.q_tinyint)' , 'first(t2.q_float)' ,'first(t2.q_double)' ,'first(t2.q_binary)' ,'first(t2.q_nchar)' ,'first(t2.q_bool)' ,'first(t2.q_ts)' , + 'last(t2.q_int)' , 'last(t2.q_bigint)' , 'last(t2.q_smallint)' , 'last(t2.q_tinyint)' , 'last(t2.q_float)' ,'last(t2.q_double)' , 'last(t2.q_binary)' ,'last(t2.q_nchar)' ,'last(t2.q_bool)' ,'last(t2.q_ts)'] + + self.calc_select_in_support_ts_j = ['bottom(t1.q_int,20)' , 'bottom(t1.q_bigint,20)' , 'bottom(t1.q_smallint,20)' , 'bottom(t1.q_tinyint,20)' ,'bottom(t1.q_float,20)' , 'bottom(t1.q_double,20)' , + 'top(t1.q_int,20)' , 'top(t1.q_bigint,20)' , 'top(t1.q_smallint,20)' ,'top(t1.q_tinyint,20)' ,'top(t1.q_float,20)' ,'top(t1.q_double,20)' , + 'min(t1.q_int)' , 'min(t1.q_bigint)' , 'min(t1.q_smallint)' , 'min(t1.q_tinyint)' , 'min(t1.q_float)' ,'min(t1.q_double)' , + 'max(t1.q_int)' , 'max(t1.q_bigint)' , 'max(t1.q_smallint)' , 'max(t1.q_tinyint)' ,'max(t1.q_float)' ,'max(t1.q_double)' , + 'bottom(t2.q_int,20)' , 'bottom(t2.q_bigint,20)' , 'bottom(t2.q_smallint,20)' , 'bottom(t2.q_tinyint,20)' ,'bottom(t2.q_float,20)' , 'bottom(t2.q_double,20)' , + 'top(t2.q_int,20)' , 'top(t2.q_bigint,20)' , 'top(t2.q_smallint,20)' ,'top(t2.q_tinyint,20)' ,'top(t2.q_float,20)' ,'top(t2.q_double,20)' , + 'min(t2.q_int)' , 'min(t2.q_bigint)' , 'min(t2.q_smallint)' , 'min(t2.q_tinyint)' , 'min(t2.q_float)' ,'min(t2.q_double)' , + 'max(t2.q_int)' , 'max(t2.q_bigint)' , 'max(t2.q_smallint)' , 'max(t2.q_tinyint)' ,'max(t2.q_float)' ,'max(t2.q_double)' , + ] + + self.calc_select_in_not_support_ts_j = ['apercentile(t1.q_int,20)' , 'apercentile(t1.q_bigint,20)' ,'apercentile(t1.q_smallint,20)' ,'apercentile(t1.q_tinyint,20)' ,'apercentile(t1.q_float,20)' ,'apercentile(t1.q_double,20)' , + 'last_row(t1.q_int)' , 'last_row(t1.q_bigint)' , 'last_row(t1.q_smallint)' , 'last_row(t1.q_tinyint)' , 'last_row(t1.q_float)' , + 'last_row(t1.q_double)' , 'last_row(t1.q_bool)' ,'last_row(t1.q_binary)' ,'last_row(t1.q_nchar)' ,'last_row(t1.q_ts)' , + 'apercentile(t2.q_int,20)' , 'apercentile(t2.q_bigint,20)' ,'apercentile(t2.q_smallint,20)' ,'apercentile(t2.q_tinyint,20)' ,'apercentile(t2.q_float,20)' ,'apercentile(t2.q_double,20)' , + 'last_row(t2.q_int)' , 'last_row(t2.q_bigint)' , 'last_row(t2.q_smallint)' , 'last_row(t2.q_tinyint)' , 'last_row(t2.q_float)' , + 'last_row(t2.q_double)' , 'last_row(t2.q_bool)' ,'last_row(t2.q_binary)' ,'last_row(t2.q_nchar)' ,'last_row(t2.q_ts)'] + + self.calc_select_in_j = ['min(t1.q_int)' , 'min(t1.q_bigint)' , 'min(t1.q_smallint)' , 'min(t1.q_tinyint)' , 'min(t1.q_float)' ,'min(t1.q_double)' , + 'max(t1.q_int)' , 'max(t1.q_bigint)' , 'max(t1.q_smallint)' , 'max(t1.q_tinyint)' ,'max(t1.q_float)' ,'max(t1.q_double)' , + 'apercentile(t1.q_int,20)' , 'apercentile(t1.q_bigint,20)' ,'apercentile(t1.q_smallint,20)' ,'apercentile(t1.q_tinyint,20)' ,'apercentile(t1.q_float,20)' ,'apercentile(t1.q_double,20)' , + 'last_row(t1.q_int)' , 'last_row(t1.q_bigint)' , 'last_row(t1.q_smallint)' , 'last_row(t1.q_tinyint)' , 'last_row(t1.q_float)' , + 'last_row(t1.q_double)' , 'last_row(t1.q_bool)' ,'last_row(t1.q_binary)' ,'last_row(t1.q_nchar)' ,'last_row(t1.q_ts)' , + 'min(t2.q_int)' , 'min(t2.q_bigint)' , 'min(t2.q_smallint)' , 'min(t2.q_tinyint)' , 'min(t2.q_float)' ,'min(t2.q_double)' , + 'max(t2.q_int)' , 'max(t2.q_bigint)' , 'max(t2.q_smallint)' , 'max(t2.q_tinyint)' ,'max(t2.q_float)' ,'max(t2.q_double)' , + 'apercentile(t2.q_int,20)' , 'apercentile(t2.q_bigint,20)' ,'apercentile(t2.q_smallint,20)' ,'apercentile(t2.q_tinyint,20)' ,'apercentile(t2.q_float,20)' ,'apercentile(t2.q_double,20)' , + 'last_row(t2.q_int)' , 'last_row(t2.q_bigint)' , 'last_row(t2.q_smallint)' , 'last_row(t2.q_tinyint)' , 'last_row(t2.q_float)' , + 'last_row(t2.q_double)' , 'last_row(t2.q_bool)' ,'last_row(t2.q_binary)' ,'last_row(t2.q_nchar)' ,'last_row(t2.q_ts)'] + self.calc_select_all_j = self.calc_select_in_ts_j + self.calc_select_in_j + + self.calc_select_regular_j = [ 'PERCENTILE(t1.q_int,10)' ,'PERCENTILE(t1.q_bigint,20)' , 'PERCENTILE(t1.q_smallint,30)' ,'PERCENTILE(t1.q_tinyint,40)' ,'PERCENTILE(t1.q_float,50)' ,'PERCENTILE(t1.q_double,60)' , + 'PERCENTILE(t2.q_int,10)' ,'PERCENTILE(t2.q_bigint,20)' , 'PERCENTILE(t2.q_smallint,30)' ,'PERCENTILE(t2.q_tinyint,40)' ,'PERCENTILE(t2.q_float,50)' ,'PERCENTILE(t2.q_double,60)'] + + + self.calc_select_fill_j = ['INTERP(t1.q_int)' ,'INTERP(t1.q_bigint)' ,'INTERP(t1.q_smallint)' ,'INTERP(t1.q_tinyint)', 'INTERP(t1.q_float)' ,'INTERP(t1.q_double)' , + 'INTERP(t2.q_int)' ,'INTERP(t2.q_bigint)' ,'INTERP(t2.q_smallint)' ,'INTERP(t2.q_tinyint)', 'INTERP(t2.q_float)' ,'INTERP(t2.q_double)'] + self.interp_where_j = ['t1.ts = now' , 't1.ts = \'2020-09-13 20:26:40.000\'' , 't1.ts = \'2020-09-13 20:26:40.009\'' ,'t2.ts = now' , 't2.ts = \'2020-09-13 20:26:40.000\'' , 't2.ts = \'2020-09-13 20:26:40.009\'' , + 't1.tbname in (\'table_1\') and t1.ts = now' ,'t1.tbname in (\'table_0\' ,\'table_1\',\'table_2\',\'table_3\',\'table_4\',\'table_5\') and t1.ts = \'2020-09-13 20:26:40.000\'','t1.tbname like \'table%\' and t1.ts = \'2020-09-13 20:26:40.002\'', + 't2.tbname in (\'table_1\') and t2.ts = now' ,'t2.tbname in (\'table_0\' ,\'table_1\',\'table_2\',\'table_3\',\'table_4\',\'table_5\') and t2.ts = \'2020-09-13 20:26:40.000\'','t2.tbname like \'table%\' and t2.ts = \'2020-09-13 20:26:40.002\''] + + # calc_aggregate_all calc_aggregate_regular calc_aggregate_groupbytbname APERCENTILE\PERCENTILE + # aggregate function include [all:count(*)\avg\sum\stddev ||regualr:twa\irate\leastsquares ||group by tbname:twa\irate\] + self.calc_aggregate_all = ['count(*)' , 'count(q_int)' ,'count(q_bigint)' , 'count(q_smallint)' ,'count(q_tinyint)' ,'count(q_float)' , + 'count(q_double)' ,'count(q_binary)' ,'count(q_nchar)' ,'count(q_bool)' ,'count(q_ts)' , + 'avg(q_int)' ,'avg(q_bigint)' , 'avg(q_smallint)' ,'avg(q_tinyint)' ,'avg(q_float)' ,'avg(q_double)' , + 'sum(q_int)' ,'sum(q_bigint)' , 'sum(q_smallint)' ,'sum(q_tinyint)' ,'sum(q_float)' ,'sum(q_double)' , + 'STDDEV(q_int)' ,'STDDEV(q_bigint)' , 'STDDEV(q_smallint)' ,'STDDEV(q_tinyint)' ,'STDDEV(q_float)' ,'STDDEV(q_double)', + 'APERCENTILE(q_int,10)' ,'APERCENTILE(q_bigint,20)' , 'APERCENTILE(q_smallint,30)' ,'APERCENTILE(q_tinyint,40)' ,'APERCENTILE(q_float,50)' ,'APERCENTILE(q_double,60)'] + + self.calc_aggregate_regular = ['twa(q_int)' ,'twa(q_bigint)' , 'twa(q_smallint)' ,'twa(q_tinyint)' ,'twa (q_float)' ,'twa(q_double)' , + 'IRATE(q_int)' ,'IRATE(q_bigint)' , 'IRATE(q_smallint)' ,'IRATE(q_tinyint)' ,'IRATE (q_float)' ,'IRATE(q_double)' , + 'LEASTSQUARES(q_int,15,3)' , 'LEASTSQUARES(q_bigint,10,1)' , 'LEASTSQUARES(q_smallint,20,3)' ,'LEASTSQUARES(q_tinyint,10,4)' ,'LEASTSQUARES(q_float,6,4)' ,'LEASTSQUARES(q_double,3,1)' , + 'PERCENTILE(q_int,10)' ,'PERCENTILE(q_bigint,20)' , 'PERCENTILE(q_smallint,30)' ,'PERCENTILE(q_tinyint,40)' ,'PERCENTILE(q_float,50)' ,'PERCENTILE(q_double,60)'] + + self.calc_aggregate_groupbytbname = ['twa(q_int)' ,'twa(q_bigint)' , 'twa(q_smallint)' ,'twa(q_tinyint)' ,'twa (q_float)' ,'twa(q_double)' , + 'IRATE(q_int)' ,'IRATE(q_bigint)' , 'IRATE(q_smallint)' ,'IRATE(q_tinyint)' ,'IRATE (q_float)' ,'IRATE(q_double)' ] + + #two table join + self.calc_aggregate_all_j = ['count(t1.*)' , 'count(t1.q_int)' ,'count(t1.q_bigint)' , 'count(t1.q_smallint)' ,'count(t1.q_tinyint)' ,'count(t1.q_float)' , + 'count(t1.q_double)' ,'count(t1.q_binary)' ,'count(t1.q_nchar)' ,'count(t1.q_bool)' ,'count(t1.q_ts)' , + 'avg(t1.q_int)' ,'avg(t1.q_bigint)' , 'avg(t1.q_smallint)' ,'avg(t1.q_tinyint)' ,'avg(t1.q_float)' ,'avg(t1.q_double)' , + 'sum(t1.q_int)' ,'sum(t1.q_bigint)' , 'sum(t1.q_smallint)' ,'sum(t1.q_tinyint)' ,'sum(t1.q_float)' ,'sum(t1.q_double)' , + 'STDDEV(t1.q_int)' ,'STDDEV(t1.q_bigint)' , 'STDDEV(t1.q_smallint)' ,'STDDEV(t1.q_tinyint)' ,'STDDEV(t1.q_float)' ,'STDDEV(t1.q_double)', + 'APERCENTILE(t1.q_int,10)' ,'APERCENTILE(t1.q_bigint,20)' , 'APERCENTILE(t1.q_smallint,30)' ,'APERCENTILE(t1.q_tinyint,40)' ,'APERCENTILE(t1.q_float,50)' ,'APERCENTILE(t1.q_double,60)' , + 'count(t2.*)' , 'count(t2.q_int)' ,'count(t2.q_bigint)' , 'count(t2.q_smallint)' ,'count(t2.q_tinyint)' ,'count(t2.q_float)' , + 'count(t2.q_double)' ,'count(t2.q_binary)' ,'count(t2.q_nchar)' ,'count(t2.q_bool)' ,'count(t2.q_ts)' , + 'avg(t2.q_int)' ,'avg(t2.q_bigint)' , 'avg(t2.q_smallint)' ,'avg(t2.q_tinyint)' ,'avg(t2.q_float)' ,'avg(t2.q_double)' , + 'sum(t2.q_int)' ,'sum(t2.q_bigint)' , 'sum(t2.q_smallint)' ,'sum(t2.q_tinyint)' ,'sum(t2.q_float)' ,'sum(t2.q_double)' , + 'STDDEV(t2.q_int)' ,'STDDEV(t2.q_bigint)' , 'STDDEV(t2.q_smallint)' ,'STDDEV(t2.q_tinyint)' ,'STDDEV(t2.q_float)' ,'STDDEV(t2.q_double)', + 'APERCENTILE(t2.q_int,10)' ,'APERCENTILE(t2.q_bigint,20)' , 'APERCENTILE(t2.q_smallint,30)' ,'APERCENTILE(t2.q_tinyint,40)' ,'APERCENTILE(t2.q_float,50)' ,'APERCENTILE(t2.q_double,60)'] + + self.calc_aggregate_regular_j = ['twa(t1.q_int)' ,'twa(t1.q_bigint)' , 'twa(t1.q_smallint)' ,'twa(t1.q_tinyint)' ,'twa (t1.q_float)' ,'twa(t1.q_double)' , + 'IRATE(t1.q_int)' ,'IRATE(t1.q_bigint)' , 'IRATE(t1.q_smallint)' ,'IRATE(t1.q_tinyint)' ,'IRATE (t1.q_float)' ,'IRATE(t1.q_double)' , + 'LEASTSQUARES(t1.q_int,15,3)' , 'LEASTSQUARES(t1.q_bigint,10,1)' , 'LEASTSQUARES(t1.q_smallint,20,3)' ,'LEASTSQUARES(t1.q_tinyint,10,4)' ,'LEASTSQUARES(t1.q_float,6,4)' ,'LEASTSQUARES(t1.q_double,3,1)' , + 'PERCENTILE(t1.q_int,10)' ,'PERCENTILE(t1.q_bigint,20)' , 'PERCENTILE(t1.q_smallint,30)' ,'PERCENTILE(t1.q_tinyint,40)' ,'PERCENTILE(t1.q_float,50)' ,'PERCENTILE(t1.q_double,60)' , + 'twa(t2.q_int)' ,'twa(t2.q_bigint)' , 'twa(t2.q_smallint)' ,'twa(t2.q_tinyint)' ,'twa (t2.q_float)' ,'twa(t2.q_double)' , + 'IRATE(t2.q_int)' ,'IRATE(t2.q_bigint)' , 'IRATE(t2.q_smallint)' ,'IRATE(t2.q_tinyint)' ,'IRATE (t2.q_float)' ,'IRATE(t2.q_double)', + 'LEASTSQUARES(t2.q_int,15,3)' , 'LEASTSQUARES(t2.q_bigint,10,1)' , 'LEASTSQUARES(t2.q_smallint,20,3)' ,'LEASTSQUARES(t2.q_tinyint,10,4)' ,'LEASTSQUARES(t2.q_float,6,4)' ,'LEASTSQUARES(t2.q_double,3,1)' , + 'PERCENTILE(t2.q_int,10)' ,'PERCENTILE(t2.q_bigint,20)' , 'PERCENTILE(t2.q_smallint,30)' ,'PERCENTILE(t2.q_tinyint,40)' ,'PERCENTILE(t2.q_float,50)' ,'PERCENTILE(t2.q_double,60)'] + + self.calc_aggregate_groupbytbname_j = ['twa(t1.q_int)' ,'twa(t1.q_bigint)' , 'twa(t1.q_smallint)' ,'twa(t1.q_tinyint)' ,'twa (t1.q_float)' ,'twa(t1.q_double)' , + 'IRATE(t1.q_int)' ,'IRATE(t1.q_bigint)' , 'IRATE(t1.q_smallint)' ,'IRATE(t1.q_tinyint)' ,'IRATE (t1.q_float)' ,'IRATE(t1.q_double)' , + 'twa(t2.q_int)' ,'twa(t2.q_bigint)' , 'twa(t2.q_smallint)' ,'twa(t2.q_tinyint)' ,'twa (t2.q_float)' ,'twa(t2.q_double)' , + 'IRATE(t2.q_int)' ,'IRATE(t2.q_bigint)' , 'IRATE(t2.q_smallint)' ,'IRATE(t2.q_tinyint)' ,'IRATE (t2.q_float)' ,'IRATE(t2.q_double)' ] + + # calc_calculate_all calc_calculate_regular calc_calculate_groupbytbname + # calculation function include [all:spread\+-*/ ||regualr:diff\derivative ||group by tbname:diff\derivative\] + self.calc_calculate_all = ['SPREAD(ts)' , 'SPREAD(q_ts)' , 'SPREAD(q_int)' ,'SPREAD(q_bigint)' , 'SPREAD(q_smallint)' ,'SPREAD(q_tinyint)' ,'SPREAD(q_float)' ,'SPREAD(q_double)' , + '(SPREAD(q_int) + SPREAD(q_bigint))' , '(SPREAD(q_smallint) - SPREAD(q_float))', '(SPREAD(q_double) * SPREAD(q_tinyint))' , '(SPREAD(q_double) / SPREAD(q_float))'] + self.calc_calculate_regular = ['DIFF(q_int)' ,'DIFF(q_bigint)' , 'DIFF(q_smallint)' ,'DIFF(q_tinyint)' ,'DIFF(q_float)' ,'DIFF(q_double)' , + 'DIFF(q_int,0)' ,'DIFF(q_bigint,0)' , 'DIFF(q_smallint,0)' ,'DIFF(q_tinyint,0)' ,'DIFF(q_float,0)' ,'DIFF(q_double,0)' , + 'DIFF(q_int,1)' ,'DIFF(q_bigint,1)' , 'DIFF(q_smallint,1)' ,'DIFF(q_tinyint,1)' ,'DIFF(q_float,1)' ,'DIFF(q_double,1)' , + 'DERIVATIVE(q_int,15s,0)' , 'DERIVATIVE(q_bigint,10s,1)' , 'DERIVATIVE(q_smallint,20s,0)' ,'DERIVATIVE(q_tinyint,10s,1)' ,'DERIVATIVE(q_float,6s,0)' ,'DERIVATIVE(q_double,3s,1)' ] + self.calc_calculate_groupbytbname = self.calc_calculate_regular + + #two table join + self.calc_calculate_all_j = ['SPREAD(t1.ts)' , 'SPREAD(t1.q_ts)' , 'SPREAD(t1.q_int)' ,'SPREAD(t1.q_bigint)' , 'SPREAD(t1.q_smallint)' ,'SPREAD(t1.q_tinyint)' ,'SPREAD(t1.q_float)' ,'SPREAD(t1.q_double)' , + 'SPREAD(t2.ts)' , 'SPREAD(t2.q_ts)' , 'SPREAD(t2.q_int)' ,'SPREAD(t2.q_bigint)' , 'SPREAD(t2.q_smallint)' ,'SPREAD(t2.q_tinyint)' ,'SPREAD(t2.q_float)' ,'SPREAD(t2.q_double)' , + '(SPREAD(t1.q_int) + SPREAD(t1.q_bigint))' , '(SPREAD(t1.q_tinyint) - SPREAD(t1.q_float))', '(SPREAD(t1.q_double) * SPREAD(t1.q_tinyint))' , '(SPREAD(t1.q_double) / SPREAD(t1.q_tinyint))', + '(SPREAD(t2.q_int) + SPREAD(t2.q_bigint))' , '(SPREAD(t2.q_smallint) - SPREAD(t2.q_float))', '(SPREAD(t2.q_double) * SPREAD(t2.q_tinyint))' , '(SPREAD(t2.q_double) / SPREAD(t2.q_tinyint))', + '(SPREAD(t1.q_int) + SPREAD(t1.q_smallint))' , '(SPREAD(t2.q_smallint) - SPREAD(t2.q_float))', '(SPREAD(t1.q_double) * SPREAD(t1.q_tinyint))' , '(SPREAD(t1.q_double) / SPREAD(t1.q_float))'] + self.calc_calculate_regular_j = ['DIFF(t1.q_int)' ,'DIFF(t1.q_bigint)' , 'DIFF(t1.q_smallint)' ,'DIFF(t1.q_tinyint)' ,'DIFF(t1.q_float)' ,'DIFF(t1.q_double)' , + 'DIFF(t1.q_int,0)' ,'DIFF(t1.q_bigint,0)' , 'DIFF(t1.q_smallint,0)' ,'DIFF(t1.q_tinyint,0)' ,'DIFF(t1.q_float,0)' ,'DIFF(t1.q_double,0)' , + 'DIFF(t1.q_int,1)' ,'DIFF(t1.q_bigint,1)' , 'DIFF(t1.q_smallint,1)' ,'DIFF(t1.q_tinyint,1)' ,'DIFF(t1.q_float,1)' ,'DIFF(t1.q_double,1)' , + 'DERIVATIVE(t1.q_int,15s,0)' , 'DERIVATIVE(t1.q_bigint,10s,1)' , 'DERIVATIVE(t1.q_smallint,20s,0)' ,'DERIVATIVE(t1.q_tinyint,10s,1)' ,'DERIVATIVE(t1.q_float,6s,0)' ,'DERIVATIVE(t1.q_double,3s,1)' , + 'DIFF(t2.q_int)' ,'DIFF(t2.q_bigint)' , 'DIFF(t2.q_smallint)' ,'DIFF(t2.q_tinyint)' ,'DIFF(t2.q_float)' ,'DIFF(t2.q_double)' , + 'DIFF(t2.q_int,0)' ,'DIFF(t2.q_bigint,0)' , 'DIFF(t2.q_smallint,0)' ,'DIFF(t2.q_tinyint,0)' ,'DIFF(t2.q_float,0)' ,'DIFF(t2.q_double,0)' , + 'DIFF(t2.q_int,1)' ,'DIFF(t2.q_bigint,1)' , 'DIFF(t2.q_smallint,1)' ,'DIFF(t2.q_tinyint,1)' ,'DIFF(t2.q_float,1)' ,'DIFF(t2.q_double,1)' , + 'DERIVATIVE(t2.q_int,15s,0)' , 'DERIVATIVE(t2.q_bigint,10s,1)' , 'DERIVATIVE(t2.q_smallint,20s,0)' ,'DERIVATIVE(t2.q_tinyint,10s,1)' ,'DERIVATIVE(t2.q_float,6s,0)' ,'DERIVATIVE(t2.q_double,3s,1)' ] + self.calc_calculate_groupbytbname_j = self.calc_calculate_regular_j + + #inter && calc_aggregate_all\calc_aggregate_regular\calc_select_all + self.interval_sliding = ['interval(4w) sliding(1w) ','interval(1w) sliding(1d) ','interval(1d) sliding(1h) ' , + 'interval(1h) sliding(1m) ','interval(1m) sliding(1s) ','interval(1s) sliding(10a) ', + 'interval(1y) ','interval(1n) ','interval(1w) ','interval(1d) ','interval(1h) ','interval(1m) ','interval(1s) ' ,'interval(10a)', + 'interval(1y,1n) ','interval(1n,1w) ','interval(1w,1d) ','interval(1d,1h) ','interval(1h,1m) ','interval(1m,1s) ','interval(1s,10a) ' ,'interval(100a,30a)'] + + self.conn1 = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos/") + self.cur1 = self.conn1.cursor() + print(self.cur1) + self.cur1.execute("use %s ;" %self.db_nest) + sql = 'select * from stable_1 limit 5;' + self.cur1.execute(sql) + + + def data_matrix_equal(self, sql1,row1_s,row1_e,col1_s,col1_e, sql2,row2_s,row2_e,col2_s,col2_e): + # ----row1_start----col1_start---- + # - - - - 是一个矩阵内的数据相等- - - + # - - - - - - - - - - - - - - - - + # ----row1_end------col1_end------ + self.sql1 = sql1 + list1 =[] + tdSql.query(sql1) + for i1 in range(row1_s-1,row1_e): + #print("iiii=%d"%i1) + for j1 in range(col1_s-1,col1_e): + #print("jjjj=%d"%j1) + #print("data=%s" %(tdSql.getData(i1,j1))) + list1.append(tdSql.getData(i1,j1)) + print("=====list1-------list1---=%s" %set(list1)) + + tdSql.execute("reset query cache;") + self.sql2 = sql2 + list2 =[] + tdSql.query(sql2) + for i2 in range(row2_s-1,row2_e): + #print("iiii222=%d"%i2) + for j2 in range(col2_s-1,col2_e): + #print("jjjj222=%d"%j2) + #print("data=%s" %(tdSql.getData(i2,j2))) + list2.append(tdSql.getData(i2,j2)) + print("=====list2-------list2---=%s" %set(list2)) + + if (list1 == list2) and len(list2)>0: + # print(("=====matrix===sql1.list1:'%s',sql2.list2:'%s'") %(list1,list2)) + tdLog.info(("===matrix===sql1:'%s' matrix_result = sql2:'%s' matrix_result") %(sql1,sql2)) + elif (set(list2)).issubset(set(list1)): + # 解决不同子表排列结果乱序 + # print(("=====list_issubset==matrix2in1-true===sql1.list1:'%s',sql2.list2:'%s'") %(list1,list2)) + tdLog.info(("===matrix_issubset===sql1:'%s' matrix_set_result = sql2:'%s' matrix_set_result") %(sql1,sql2)) + #elif abs(float(str(list1).replace("]","").replace("[","").replace("e+","")) - float(str(list2).replace("]","").replace("[","").replace("e+",""))) <= 0.0001: + elif abs(float(str(list1).replace("datetime.datetime","").replace("]","").replace("[","").replace("e+","").replace(", ","").replace("(","").replace(")","").replace("-","")) - float(str(list2).replace("datetime.datetime","").replace("]","").replace("[","").replace("e+","").replace(", ","").replace("(","").replace(")","").replace("-",""))) <= 0.0001: + print(("=====matrix_abs+e+===sql1.list1:'%s',sql2.list2:'%s'") %(list1,list2)) + print(("=====matrix_abs+e+replace_after===sql1.list1:'%s',sql2.list2:'%s'") %(float(str(list1).replace("datetime.datetime","").replace("]","").replace("[","").replace("e+","").replace(", ","").replace("(","").replace(")","").replace("-","")),float(str(list2).replace("datetime.datetime","").replace("]","").replace("[","").replace("e+","").replace(", ","").replace("(","").replace(")","").replace("-","")))) + tdLog.info(("===matrix_abs+e+===sql1:'%s' matrix_result = sql2:'%s' matrix_result") %(sql1,sql2)) + elif abs(float(str(list1).replace("datetime.datetime","").replace("]","").replace("[","").replace(", ","").replace("(","").replace(")","").replace("-","")) - float(str(list2).replace("datetime.datetime","").replace("]","").replace("[","").replace(", ","").replace("(","").replace(")","").replace("-",""))) <= 0.1: + #{datetime.datetime(2021, 8, 27, 1, 46, 40), -441.46841430664057}replace + print(("=====matrix_abs+replace===sql1.list1:'%s',sql2.list2:'%s'") %(list1,list2)) + print(("=====matrix_abs+replace_after===sql1.list1:'%s',sql2.list2:'%s'") %(float(str(list1).replace("datetime.datetime","").replace("]","").replace("[","").replace(", ","").replace("(","").replace(")","").replace("-","")),float(str(list1).replace("datetime.datetime","").replace("]","").replace("[","").replace(", ","").replace("(","").replace(")","").replace("-","")))) + tdLog.info(("===matrix_abs+replace===sql1:'%s' matrix_result = sql2:'%s' matrix_result") %(sql1,sql2)) + elif abs(float(str(list1).replace("datetime.datetime","").replace("]","").replace("[","").replace(", ","").replace("(","").replace(")","").replace("-","")) - float(str(list2).replace("datetime.datetime","").replace("]","").replace("[","").replace(", ","").replace("(","").replace(")","").replace("-",""))) <= 0.5: + print(("=====matrix_abs===sql1.list1:'%s',sql2.list2:'%s'") %(list1,list2)) + print(("=====matrix_abs===sql1.list1:'%s',sql2.list2:'%s'") %(float(str(list1).replace("datetime.datetime","").replace("]","").replace("[","").replace(", ","").replace("(","").replace(")","").replace("-","")),float(str(list2).replace("datetime.datetime","").replace("]","").replace("[","").replace(", ","").replace("(","").replace(")","").replace("-","")))) + tdLog.info(("===matrix_abs======sql1:'%s' matrix_result = sql2:'%s' matrix_result") %(sql1,sql2)) + else: + print(("=====matrix_error===sql1.list1:'%s',sql2.list2:'%s'") %(list1,list2)) + tdLog.info(("sql1:'%s' matrix_result != sql2:'%s' matrix_result") %(sql1,sql2)) + return tdSql.checkEqual(list1,list2) + + def restartDnodes(self): + pass + # tdDnodes.stop(1) + # tdDnodes.start(1) + + def dropandcreateDB_random(self,database,n): + ts = 1630000000000 + num_random = 100 + fake = Faker('zh_CN') + tdSql.execute('''drop database if exists %s ;''' %database) + tdSql.execute('''create database %s keep 36500;'''%database) + tdSql.execute('''use %s;'''%database) + + tdSql.execute('''create stable stable_1 (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \ + q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\ + q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\ + q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) \ + tags(loc nchar(100) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, t_bool bool , t_binary binary(100) , t_nchar nchar(100) ,t_float float , t_double double , t_ts timestamp);''') + tdSql.execute('''create stable stable_2 (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \ + q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\ + q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\ + q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) \ + tags(loc nchar(100) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, t_bool bool , t_binary binary(100) , t_nchar nchar(100) ,t_float float , t_double double , t_ts timestamp);''') + + tdSql.execute('''create stable stable_null_data (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \ + q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\ + q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\ + q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) \ + tags(loc nchar(100) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, t_bool bool , t_binary binary(100) , t_nchar nchar(100) ,t_float float , t_double double , t_ts timestamp);''') + + tdSql.execute('''create stable stable_null_childtable (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \ + q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\ + q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\ + q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) \ + tags(loc nchar(100) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, t_bool bool , t_binary binary(100) , t_nchar nchar(100) ,t_float float , t_double double , t_ts timestamp);''') + + #tdSql.execute('''create table stable_1_1 using stable_1 tags('stable_1_1', '0' , '0' , '0' , '0' , 0 , 'binary1' , 'nchar1' , '0' , '0' ,'0') ;''') + tdSql.execute('''create table stable_1_1 using stable_1 tags('stable_1_1', '%d' , '%d', '%d' , '%d' , 0 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;''' + %(fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1), + fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) , + fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1))) + tdSql.execute('''create table stable_1_2 using stable_1 tags('stable_1_2', '2147483647' , '9223372036854775807' , '32767' , '127' , 1 , 'binary2' , 'nchar2' , '2' , '22' , \'1999-09-09 09:09:09.090\') ;''') + tdSql.execute('''create table stable_1_3 using stable_1 tags('stable_1_3', '-2147483647' , '-9223372036854775807' , '-32767' , '-127' , false , 'binary3' , 'nchar3nchar3' , '-3.3' , '-33.33' , \'2099-09-09 09:09:09.090\') ;''') + #tdSql.execute('''create table stable_1_4 using stable_1 tags('stable_1_4', '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0') ;''') + tdSql.execute('''create table stable_1_4 using stable_1 tags('stable_1_4', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;''' + %(fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1), + fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) , + fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1))) + + # tdSql.execute('''create table stable_2_1 using stable_2 tags('stable_2_1' , '0' , '0' , '0' , '0' , 0 , 'binary21' , 'nchar21' , '0' , '0' ,'0') ;''') + # tdSql.execute('''create table stable_2_2 using stable_2 tags('stable_2_2' , '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0') ;''') + + # tdSql.execute('''create table stable_null_data_1 using stable_null_data tags('stable_null_data_1', '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0') ;''') + + tdSql.execute('''create table stable_2_1 using stable_2 tags('stable_2_1' , '0' , '0' , '0' , '0' , 0 , 'binary21' , 'nchar21' , '0' , '0' ,\'2099-09-09 09:09:09.090\') ;''') + tdSql.execute('''create table stable_2_2 using stable_2 tags('stable_2_2' , '%d' , '%d', '%d' , '%d' , 0 , 'binary2.%s' , 'nchar2.%s' , '%f', '%f' ,'%d') ;''' + %(fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1), + fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) , + fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1))) + + tdSql.execute('''create table stable_null_data_1 using stable_null_data tags('stable_null_data_1', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;''' + %(fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1), + fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) , + fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1))) + + #regular table + tdSql.execute('''create table regular_table_1 \ + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \ + q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\ + q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\ + q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) ;''') + tdSql.execute('''create table regular_table_2 \ + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \ + q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\ + q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\ + q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) ;''') + tdSql.execute('''create table regular_table_3 \ + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \ + q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\ + q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\ + q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) ;''') + + tdSql.execute('''create table regular_table_null \ + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \ + q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\ + q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\ + q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) ;''') + + + for i in range(num_random*n): + tdSql.execute('''insert into stable_1_1 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double , q_bool , q_binary , q_nchar, q_ts,\ + q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \ + values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000, fake.random_int(min=-2147483647, max=2147483647, step=1), + fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1), + fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) + tdSql.execute('''insert into regular_table_1 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\ + q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \ + values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000, fake.random_int(min=-2147483647, max=2147483647, step=1) , + fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1) , + fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) + + tdSql.execute('''insert into stable_1_2 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\ + q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8)\ + values(%d, %d, %d, %d, %d, %f, %f, 1, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000, fake.random_int(min=0, max=2147483647, step=1), + fake.random_int(min=0, max=9223372036854775807, step=1), + fake.random_int(min=0, max=32767, step=1) , fake.random_int(min=0, max=127, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) + tdSql.execute('''insert into regular_table_2 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\ + q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \ + values(%d, %d, %d, %d, %d, %f, %f, 1, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000, fake.random_int(min=0, max=2147483647, step=1), + fake.random_int(min=0, max=9223372036854775807, step=1), + fake.random_int(min=0, max=32767, step=1) , fake.random_int(min=0, max=127, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) + + tdSql.execute('''insert into stable_1_2 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\ + q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \ + values(%d, %d, %d, %d, %d, %f, %f, 1, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000 +1, fake.random_int(min=-2147483647, max=0, step=1), + fake.random_int(min=-9223372036854775807, max=0, step=1), + fake.random_int(min=-32767, max=0, step=1) , fake.random_int(min=-127, max=0, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i +1, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) + tdSql.execute('''insert into regular_table_2 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\ + q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \ + values(%d, %d, %d, %d, %d, %f, %f, 1, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000 +1, fake.random_int(min=-2147483647, max=0, step=1), + fake.random_int(min=-9223372036854775807, max=0, step=1), + fake.random_int(min=-32767, max=0, step=1) , fake.random_int(min=-127, max=0, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i +1, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) + + tdSql.execute('''insert into stable_2_1 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\ + q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \ + values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000, fake.random_int(min=-0, max=2147483647, step=1), + fake.random_int(min=-0, max=9223372036854775807, step=1), + fake.random_int(min=-0, max=32767, step=1) , fake.random_int(min=-0, max=127, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) + + tdSql.execute('''insert into stable_2_1 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\ + q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \ + values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000 +1, fake.random_int(min=-0, max=2147483647, step=1), + fake.random_int(min=-0, max=9223372036854775807, step=1), + fake.random_int(min=-0, max=32767, step=1) , fake.random_int(min=-0, max=127, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) + + tdSql.execute('''insert into stable_2_1 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\ + q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \ + values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000 +10, fake.random_int(min=-0, max=2147483647, step=1), + fake.random_int(min=-0, max=9223372036854775807, step=1), + fake.random_int(min=-0, max=32767, step=1) , fake.random_int(min=-0, max=127, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) + + tdSql.query("select count(*) from stable_1;") + tdSql.checkData(0,0,3*num_random*n) + tdSql.query("select count(*) from regular_table_1;") + tdSql.checkData(0,0,num_random*n) + + def math_nest(self,mathlist): + + print("==========%s===start=============" %mathlist) + os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename)) + + self.dropandcreateDB_random("%s" %self.db_nest, 1) + + if (mathlist == ['ABS','SQRT']) or (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['FLOOR','CEIL','ROUND']) \ + or (mathlist == ['CSUM']) or (mathlist == ['']): + math_functions = mathlist + fun_fix_column = ['(q_bigint)','(q_smallint)','(q_tinyint)','(q_int)','(q_float)','(q_double)','(q_bigint_null)','(q_smallint_null)','(q_tinyint_null)','(q_int_null)','(q_float_null)','(q_double_null)'] + fun_column_1 = random.sample(math_functions,1)+random.sample(fun_fix_column,1) + math_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_2 = random.sample(math_functions,1)+random.sample(fun_fix_column,1) + math_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + fun_fix_column_j = ['(t1.q_bigint)','(t1.q_smallint)','(t1.q_tinyint)','(t1.q_int)','(t1.q_float)','(t1.q_double)','(t1.q_bigint_null)','(t1.q_smallint_null)','(t1.q_tinyint_null)','(t1.q_int_null)','(t1.q_float_null)','(t1.q_double_null)', + '(t2.q_bigint)','(t2.q_smallint)','(t2.q_tinyint)','(t2.q_int)','(t2.q_float)','(t2.q_double)','(t2.q_bigint_null)','(t2.q_smallint_null)','(t2.q_tinyint_null)','(t2.q_int_null)','(t2.q_float_null)','(t2.q_double_null)'] + fun_column_join_1 = random.sample(math_functions,1)+random.sample(fun_fix_column_j,1) + math_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_join_2 = random.sample(math_functions,1)+random.sample(fun_fix_column_j,1) + math_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + elif (mathlist == ['UNIQUE']) or (mathlist == ['HYPERLOGLOG']): + math_functions = mathlist + fun_fix_column = ['(q_bigint)','(q_smallint)','(q_tinyint)','(q_int)','(q_float)','(q_double)','(q_binary)','(q_nchar)','(q_bool)','(q_ts)', + '(q_bigint_null)','(q_smallint_null)','(q_tinyint_null)','(q_int_null)','(q_float_null)','(q_double_null)','(q_binary_null)','(q_nchar_null)','(q_bool_null)','(q_ts_null)'] + fun_column_1 = random.sample(math_functions,1)+random.sample(fun_fix_column,1) + math_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_2 = random.sample(math_functions,1)+random.sample(fun_fix_column,1) + math_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + fun_fix_column_j = ['(t1.q_bigint)','(t1.q_smallint)','(t1.q_tinyint)','(t1.q_int)','(t1.q_float)','(t1.q_double)','(t1.q_bigint_null)','(t1.q_smallint_null)','(t1.q_tinyint_null)','(t1.q_int_null)','(t1.q_float_null)','(t1.q_double_null)', + '(t2.q_bigint)','(t2.q_smallint)','(t2.q_tinyint)','(t2.q_int)','(t2.q_float)','(t2.q_double)','(t2.q_bigint_null)','(t2.q_smallint_null)','(t2.q_tinyint_null)','(t2.q_int_null)','(t2.q_float_null)','(t2.q_double_null)'] + fun_column_join_1 = random.sample(math_functions,1)+random.sample(fun_fix_column_j,1) + math_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_join_2 = random.sample(math_functions,1)+random.sample(fun_fix_column_j,1) + math_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + elif (mathlist == ['POW','LOG']) or (mathlist == ['MAVG']) or (mathlist == ['SAMPLE']) or (mathlist == ['TAIL']): + math_functions = mathlist + num = random.randint(0, 1000) + fun_fix_column = ['(q_bigint,num)','(q_smallint,num)','(q_tinyint,num)','(q_int,num)','(q_float,num)','(q_double,num)', + '(q_bigint_null,num)','(q_smallint_null,num)','(q_tinyint_null,num)','(q_int_null,num)','(q_float_null,num)','(q_double_null,num)'] + fun_column_1 = random.sample(math_functions,1)+random.sample(fun_fix_column,1) + math_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",str(num)) + fun_column_2 = random.sample(math_functions,1)+random.sample(fun_fix_column,1) + math_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",str(num)) + + fun_fix_column_j = ['(t1.q_bigint,num)','(t1.q_smallint,num)','(t1.q_tinyint,num)','(t1.q_int,num)','(t1.q_float,num)','(t1.q_double,num)', + '(t1.q_bigint_null,num)','(t1.q_smallint_null,num)','(t1.q_tinyint_null,num)','(t1.q_int_null,num)','(t1.q_float_null,num)','(t1.q_double_null,num)', + '(t2.q_bigint,num)','(t2.q_smallint,num)','(t2.q_tinyint,num)','(t2.q_int,num)','(t2.q_float,num)','(t2.q_double,num)', + '(t2.q_bigint_null,num)','(t2.q_smallint_null,num)','(t2.q_tinyint_null,num)','(t2.q_int_null,num)','(t2.q_float_null,num)','(t2.q_double_null,num)'] + fun_column_join_1 = random.sample(math_functions,1)+random.sample(fun_fix_column_j,1) + math_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",str(num)) + fun_column_join_2 = random.sample(math_functions,1)+random.sample(fun_fix_column_j,1) + math_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",str(num)) + + tdSql.query("select 1-1 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts , floor(asct1) from ( select " + sql += "%s as asct1, " % math_fun_1 + sql += "%s as asct2, " % math_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM']) \ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select floor(asct1) from ( select " + sql += "%s as asct1 " % math_fun_1 + # sql += "%s as asct2, " % math_fun_2 + # sql += "%s, " % random.choice(self.s_s_select) + # sql += "%s, " % random.choice(self.q_select) + sql += " from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + #sql += "%s " % random.choice(self.order_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + #tdSql.checkRows(100) + self.cur1.execute(sql) + + tdSql.query("select 1-2 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts , abs(asct1) from ( select " + sql += "%s as asct1, " % math_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s )" % random.choice(self.order_where) + sql += "%s " % random.choice(self.unionall_or_union) + sql += "select ts , asct2 from ( select " + sql += "%s as asct2, " % math_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + #sql += "%s " % random.choice(having_support) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM']) \ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select abs(asct1) from ( select " + sql += "%s as asct1 " % math_fun_1 + # sql += "%s, " % random.choice(self.s_s_select) + # sql += "%s, " % random.choice(self.q_select) + sql += "from regular_table_1 where " + sql += "%s )" % random.choice(self.q_where) + #sql += "%s )" % random.choice(self.order_where) + sql += "%s " % random.choice(self.unionall_or_union) + sql += "select floor(asct2) from ( select " + sql += "%s as asct2 " % math_fun_2 + # sql += "%s, " % random.choice(self.s_s_select) + # sql += "%s, " % random.choice(self.q_select) + sql += " from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + #sql += "%s " % random.choice(having_support) + #sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15473 tdSql.query(sql) + #TD-15473 self.cur1.execute(sql) + + tdSql.query("select 1-3 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts , min(asct1) from ( select " + sql += "%s as asct1, ts ," % math_fun_1 + sql += "%s as asct2, " % math_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s select " % random.choice(self.unionall_or_union) + sql += "%s as asct2, ts ," % math_fun_2 + sql += "%s as asct1, " % math_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15473 tdSql.query(sql) + #self.cur1.execute(sql) + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM']) \ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select ts , min(asct1) from ( select " + sql += "%s as asct1, ts ," % math_fun_1 + sql += "%s as asct2, " % math_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s select " % random.choice(self.unionall_or_union) + sql += "%s as asct2, ts ," % math_fun_2 + sql += "%s as asct1, " % math_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15473 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 1-4 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts , asct1 from ( select t1.ts as ts," + sql += "%s, " % math_fun_join_1 + sql += "%s as asct1, " % math_fun_join_2 + sql += "%s, " % math_fun_join_1 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select count(asct1) from ( select " + sql += "%s as asct1 " % math_fun_join_2 + sql += "from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s " % random.choice(self.q_u_or_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-5 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts ," + sql += "%s, " % math_fun_1 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s " % math_fun_2 + sql += " from ( select * from regular_table_1 ) where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += " ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select " + # sql += "%s, " % math_fun_1 + # sql += "%s, " % random.choice(self.q_select) + # sql += "%s, " % random.choice(self.q_select) + sql += "%s " % math_fun_2 + sql += " from ( select * from regular_table_1 ) where " + sql += "%s " % random.choice(self.q_where) + sql += " ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15973 tdSql.query(sql) + #TD-15973 self.cur1.execute(sql) + + tdSql.query("select 1-6 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts , max(asct1) from ( select t1.ts as ts," + sql += "%s, " % math_fun_join_1 + sql += "%s as asct1, " % math_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % math_fun_join_1 + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s )" % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM']) \ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select max(asct1) from ( select " + #sql += "%s, " % math_fun_join_1 + sql += "%s as asct1 " % math_fun_join_2 + # sql += "t1.%s, " % random.choice(self.q_select) + # sql += "t2.%s, " % random.choice(self.q_select) + # sql += "%s, " % math_fun_join_1 + sql += "from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s )" % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-7 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts , abs(asct1) from ( select " + sql += "%s as asct1, ts ," % math_fun_1 + sql += "%s as asct2, " % math_fun_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # tdSql.checkRows(300) + # self.cur1.execute(sql)# TD-16039 + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE']) or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select abs(asct1) from ( select " + sql += "%s as asct1 " % math_fun_1 + sql += "from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + + tdSql.query("select 1-8 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts,floor(asct1) " + sql += "from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s as asct1, ts ," % math_fun_1 + sql += "%s as asct2, " % math_fun_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #tdSql.query(sql) + # tdSql.checkRows(300) + # self.cur1.execute(sql)# TD-16039 + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select floor(asct1) " + sql += "from ( select " + sql += "%s as asct1 " % math_fun_1 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + + tdSql.query("select 1-9 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts , max(asct1) from ( select t1.ts as ts," + sql += "%s, " % math_fun_join_1 + sql += "%s as asct1, " % math_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "and %s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select max(asct1) from ( select " + sql += "%s as asct1 " % math_fun_join_2 + # sql += "t1.%s, " % random.choice(self.q_select) + # sql += "t1.%s, " % random.choice(self.q_select) + # sql += "t2.%s, " % random.choice(self.q_select) + # sql += "t2.%s, " % random.choice(self.q_select) + sql += "from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "and %s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql)# TD-16039 + # self.cur1.execute(sql) + + self.restartDnodes() + tdSql.query("select 1-10 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts , min(asct1) from ( select " + sql += "%s as asct1, ts ," % math_fun_1 + sql += "%s as asct2, " % math_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ") %s " % random.choice(self.unionall_or_union) + sql += "select ts , max(asct2) from ( select " + sql += "%s as asct1, ts ," % math_fun_1 + sql += "%s as asct2, " % math_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM']) \ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select min(asct1) from ( select " + sql += "%s as asct1 " % math_fun_1 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += ") %s " % random.choice(self.unionall_or_union) + sql += "select max(asct2) from ( select " + sql += "%s as asct2 " % math_fun_2 + sql += "from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + + #3 inter union not support + tdSql.query("select 1-11 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts , min(asct1), max(asct2) from ( select " + sql += "%s as asct1, ts ," % math_fun_1 + sql += "%s as asct2, " % math_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + #sql += "%s " % random.choice(limit1_where) + sql += " %s " % random.choice(self.unionall_or_union) + sql += " select " + sql += "%s as asct1, ts ," % math_fun_1 + sql += "%s as asct2, " % math_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15837 tdSql.query(sql) + # self.cur1.execute(sql) + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM']) \ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select min(asct1) from ( select " + sql += "%s as asct1 " % math_fun_1 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += " %s " % random.choice(self.unionall_or_union) + sql += " select " + sql += "%s as asct2 " % math_fun_2 + sql += " from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15837 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 1-12 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts , max(asct1) from ( select t1.ts as ts," + sql += "%s, " % math_fun_join_1 + sql += "%s as asct1, " % math_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select max(asct1) from ( select " + sql += "%s as asct1 " % math_fun_join_2 + sql += " from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + + tdSql.query("select 1-13 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts ," + sql += "%s, " % math_fun_1 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s " % math_fun_2 + sql += "%s " % random.choice(self.t_select) + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # tdSql.checkRows(300) + # self.cur1.execute(sql) # TD-16039 + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM']) \ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select " + sql += "%s " % math_fun_2 + sql += "%s " % random.choice(self.t_select) + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD15973 tdSql.query(sql) + #TD15973 self.cur1.execute(sql) + + tdSql.query("select 1-14 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select avg(asct1),count(asct2) from ( select " + sql += "%s as asct1, " % math_fun_1 + sql += "%s as asct2" % math_fun_2 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.partiton_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += " ) ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select avg(asct1) from ( select " + sql += "%s as asct1 " % math_fun_1 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.partiton_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += " ) ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + + tdSql.query("select 1-15 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts , max(asct1) from ( select t1.ts as ts," + sql += "%s, " % math_fun_join_1 + sql += "%s as asct1, " % math_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s " % random.choice(self.q_select) + sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += " and %s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.partiton_where_j) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") " + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE']) or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select max(asct1) from ( select " + sql += "%s as asct1 " % math_fun_join_2 + sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += " and %s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.partiton_where_j) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") " + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + + #taos -f sql + startTime_taos_f = time.time() + print("taos -f %s sql start!" %mathlist) + taos_cmd1 = "taos -f %s/%s.sql" % (self.testcasePath,self.testcaseFilename) + _ = subprocess.check_output(taos_cmd1, shell=True).decode("utf-8") + print("taos -f %s sql over!" %mathlist) + endTime_taos_f = time.time() + print("taos_f total time %ds" % (endTime_taos_f - startTime_taos_f)) + + print("=========%s====over=============" %mathlist) + + + def str_nest(self,strlist): + + print("==========%s===start=============" %strlist) + os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename)) + + self.dropandcreateDB_random("%s" %self.db_nest, 1) + + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['LENGTH','CHAR_LENGTH']) \ + or (strlist == ['']): + str_functions = strlist + fun_fix_column = ['(q_nchar)','(q_binary)','(q_nchar_null)','(q_binary_null)'] + fun_column_1 = random.sample(str_functions,1)+random.sample(fun_fix_column,1) + str_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_2 = random.sample(str_functions,1)+random.sample(fun_fix_column,1) + str_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + fun_fix_column_j = ['(t1.q_nchar)','(t1.q_binary)','(t1.q_nchar_null)','(t1.q_binary_null)', + '(t2.q_nchar)','(t2.q_binary)','(t2.q_nchar_null)','(t2.q_binary_null)'] + fun_column_join_1 = random.sample(str_functions,1)+random.sample(fun_fix_column_j,1) + str_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_join_2 = random.sample(str_functions,1)+random.sample(fun_fix_column_j,1) + str_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + fun_fix_column_s = ['(q_nchar)','(q_binary)','(q_nchar_null)','(q_binary_null)','(loc)','(tbname)'] + fun_column_s_1 = random.sample(str_functions,1)+random.sample(fun_fix_column_s,1) + str_fun_s_1 = str(fun_column_s_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_s_2 = random.sample(str_functions,1)+random.sample(fun_fix_column_s,1) + str_fun_s_2 = str(fun_column_s_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + fun_fix_column_s_j = ['(t1.q_nchar)','(t1.q_binary)','(t1.q_nchar_null)','(t1.q_binary_null)','(t1.loc)','(t1.tbname)', + '(t2.q_nchar)','(t2.q_binary)','(t2.q_nchar_null)','(t2.q_binary_null)','(t2.loc)','(t2.tbname)'] + fun_column_join_s_1 = random.sample(str_functions,1)+random.sample(fun_fix_column_j,1) + str_fun_join_s_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_join_s_2 = random.sample(str_functions,1)+random.sample(fun_fix_column_j,1) + str_fun_join_s_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + elif (strlist == ['SUBSTR']) : + str_functions = strlist + pos = random.randint(1, 20) + sub_len = random.randint(1, 10) + fun_fix_column = ['(q_nchar,pos)','(q_binary,pos)','(q_nchar_null,pos)','(q_binary_null,pos)', + '(q_nchar,pos,sub_len)','(q_binary,pos,sub_len)','(q_nchar_null,pos,sub_len)','(q_binary_null,pos,sub_len)',] + fun_column_1 = random.sample(str_functions,1)+random.sample(fun_fix_column,1) + str_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len)) + fun_column_2 = random.sample(str_functions,1)+random.sample(fun_fix_column,1) + str_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len)) + + fun_fix_column_j = ['(t1.q_nchar,pos)','(t1.q_binary,pos)','(t1.q_nchar_null,pos)','(t1.q_binary_null,pos)', + '(t1.q_nchar,pos,sub_len)','(t1.q_binary,pos,sub_len)','(t1.q_nchar_null,pos,sub_len)','(t1.q_binary_null,pos,sub_len)', + '(t2.q_nchar,pos)','(t2.q_binary,pos)','(t2.q_nchar_null,pos)','(t2.q_binary_null,pos)', + '(t2.q_nchar,pos,sub_len)','(t2.q_binary,pos,sub_len)','(t2.q_nchar_null,pos,sub_len)','(t2.q_binary_null,pos,sub_len)'] + fun_column_join_1 = random.sample(str_functions,1)+random.sample(fun_fix_column_j,1) + str_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len)) + fun_column_join_2 = random.sample(str_functions,1)+random.sample(fun_fix_column_j,1) + str_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len)) + + fun_fix_column_s = ['(q_nchar,pos)','(q_binary,pos)','(q_nchar_null,pos)','(q_binary_null,pos)','(loc,pos)', + '(q_nchar,pos,sub_len)','(q_binary,pos,sub_len)','(q_nchar_null,pos,sub_len)','(q_binary_null,pos,sub_len)','(loc,pos,sub_len)',] + fun_column_s_1 = random.sample(str_functions,1)+random.sample(fun_fix_column_s,1) + str_fun_s_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len)) + fun_column_s_2 = random.sample(str_functions,1)+random.sample(fun_fix_column_s,1) + str_fun_s_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len)) + + fun_fix_column_s_j = ['(t1.q_nchar,pos)','(t1.q_binary,pos)','(t1.q_nchar_null,pos)','(t1.q_binary_null,pos)','(t1.loc,pos)', + '(t1.q_nchar,pos,sub_len)','(t1.q_binary,pos,sub_len)','(t1.q_nchar_null,pos,sub_len)','(t1.q_binary_null,pos,sub_len)','(t1.loc,pos,sub_len)', + '(t2.q_nchar,pos)','(t2.q_binary,pos)','(t2.q_nchar_null,pos)','(t2.q_binary_null,pos)','(t2.loc,pos)', + '(t2.q_nchar,pos,sub_len)','(t2.q_binary,pos,sub_len)','(t2.q_nchar_null,pos,sub_len)','(t2.q_binary_null,pos,sub_len)','(t2.loc,pos,sub_len)'] + fun_column_join_s_1 = random.sample(str_functions,1)+random.sample(fun_fix_column_s_j,1) + str_fun_join_s_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len)) + fun_column_join_s_2 = random.sample(str_functions,1)+random.sample(fun_fix_column_s_j,1) + str_fun_join_s_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len)) + + elif (strlist == ['CONCAT']) : + str_functions = strlist + i = random.randint(2,8) + fun_fix_column = ['q_nchar','q_nchar1','q_nchar2','q_nchar3','q_nchar4','q_nchar5','q_nchar6','q_nchar7','q_nchar8','q_nchar_null', + 'q_binary','q_binary1','q_binary2','q_binary3','q_binary4','q_binary5','q_binary6','q_binary7','q_binary8','q_binary_null'] + + column1 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") + fun_column_1 = str(random.sample(str_functions,1))+'('+column1+')' + str_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","") + + column2 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") + fun_column_2 = str(random.sample(str_functions,1))+'('+column2+')' + str_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","") + + fun_fix_column_j = ['(t1.q_nchar)','(t1.q_nchar1)','(t1.q_nchar2)','(t1.q_nchar3)','(t1.q_nchar4)','(t1.q_nchar5)','(t1.q_nchar6)','(t1.q_nchar7)','(t1.q_nchar8)','(t1.q_nchar_null)', + '(t2.q_nchar)','(t2.q_nchar1)','(t2.q_nchar2)','(t2.q_nchar3)','(t2.q_nchar4)','(t2.q_nchar5)','(t2.q_nchar6)','(t2.q_nchar7)','(t2.q_nchar8)','(t2.q_nchar_null)', + '(t1.q_binary)','(t1.q_binary1)','(t1.q_binary2)','(t1.q_binary3)','(t1.q_binary4)','(t1.q_binary5)','(t1.q_binary6)','(t1.q_binary7)','(t1.q_binary8)','(t1.q_binary_null)', + '(t2.q_binary)','(t2.q_binary1)','(t2.q_binary2)','(t2.q_binary3)','(t2.q_binary4)','(t2.q_binary5)','(t2.q_binary6)','(t2.q_binary7)','(t2.q_binary8)','(t2.q_binary_null)'] + + column_j1 = str(random.sample(fun_fix_column_j,i)).replace("[","").replace("]","").replace("'","") + fun_column_join_1 = str(random.sample(str_functions,1))+'('+column_j1+')' + str_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","") + + column_j2 = str(random.sample(fun_fix_column_j,i)).replace("[","").replace("]","").replace("'","") + fun_column_join_2 = str(random.sample(str_functions,1))+'('+column_j2+')' + str_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","") + + fun_fix_column_s = ['q_nchar','q_nchar1','q_nchar2','q_nchar3','q_nchar4','q_nchar5','q_nchar6','q_nchar7','q_nchar8','loc','q_nchar_null', + 'q_binary','q_binary1','q_binary2','q_binary3','q_binary4','q_binary5','q_binary6','q_binary7','q_binary8','q_binary_null'] + + column_s1 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") + fun_column_s_1 = str(random.sample(str_functions,1))+'('+column_s1+')' + str_fun_s_1 = str(fun_column_s_1).replace("[","").replace("]","").replace("'","") + + column_s2 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") + fun_column_s_2 = str(random.sample(str_functions,1))+'('+column_s2+')' + str_fun_s_2 = str(fun_column_s_2).replace("[","").replace("]","").replace("'","") + + fun_fix_column_s_j = ['(t1.q_nchar)','(t1.q_nchar1)','(t1.q_nchar2)','(t1.q_nchar3)','(t1.q_nchar4)','(t1.q_nchar5)','(t1.q_nchar6)','(t1.q_nchar7)','(t1.q_nchar8)','(t1.q_nchar_null)','(t1.loc)', + '(t2.q_nchar)','(t2.q_nchar1)','(t2.q_nchar2)','(t2.q_nchar3)','(t2.q_nchar4)','(t2.q_nchar5)','(t2.q_nchar6)','(t2.q_nchar7)','(t2.q_nchar8)','(t2.q_nchar_null)','(t2.loc)', + '(t1.q_binary)','(t1.q_binary1)','(t1.q_binary2)','(t1.q_binary3)','(t1.q_binary4)','(t1.q_binary5)','(t1.q_binary6)','(t1.q_binary7)','(t1.q_binary8)','(t1.q_binary_null)', + '(t2.q_binary)','(t2.q_binary1)','(t2.q_binary2)','(t2.q_binary3)','(t2.q_binary4)','(t2.q_binary5)','(t2.q_binary6)','(t2.q_binary7)','(t2.q_binary8)','(t2.q_binary_null)'] + + column_j_s1 = str(random.sample(fun_fix_column_s_j,i)).replace("[","").replace("]","").replace("'","") + fun_column_join_s_1 = str(random.sample(str_functions,1))+'('+column_j_s1+')' + str_fun_join_s_1 = str(fun_column_join_s_1).replace("[","").replace("]","").replace("'","") + + column_j_s2 = str(random.sample(fun_fix_column_s_j,i)).replace("[","").replace("]","").replace("'","") + fun_column_join_s_2 = str(random.sample(str_functions,1))+'('+column_j_s2+')' + str_fun_join_s_2 = str(fun_column_join_s_2).replace("[","").replace("]","").replace("'","") + + elif (strlist == ['CONCAT_WS']): + str_functions = strlist + i = random.randint(2,8) + fun_fix_column = ['q_nchar','q_nchar1','q_nchar2','q_nchar3','q_nchar4','q_nchar5','q_nchar6','q_nchar7','q_nchar8','q_nchar_null', + 'q_binary','q_binary1','q_binary2','q_binary3','q_binary4','q_binary5','q_binary6','q_binary7','q_binary8','q_binary_null'] + + separators = ['',' ','abc','123','!','@','#','$','%','^','&','*','(',')','-','_','+','=','{', + '[','}',']','|',';',':',',','.','<','>','?','/','~','`','taos','涛思'] + separator = str(random.sample(separators,i)).replace("[","").replace("]","") + + column1 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") + fun_column_1 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column1+')' + str_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","") + + column2 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") + fun_column_2 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column2+')' + str_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","") + + fun_fix_column_j = ['(t1.q_nchar)','(t1.q_nchar1)','(t1.q_nchar2)','(t1.q_nchar3)','(t1.q_nchar4)','(t1.q_nchar5)','(t1.q_nchar6)','(t1.q_nchar7)','(t1.q_nchar8)','(t1.q_nchar_null)', + '(t2.q_nchar)','(t2.q_nchar1)','(t2.q_nchar2)','(t2.q_nchar3)','(t2.q_nchar4)','(t2.q_nchar5)','(t2.q_nchar6)','(t2.q_nchar7)','(t2.q_nchar8)','(t2.q_nchar_null)', + '(t1.q_binary)','(t1.q_binary1)','(t1.q_binary2)','(t1.q_binary3)','(t1.q_binary4)','(t1.q_binary5)','(t1.q_binary6)','(t1.q_binary7)','(t1.q_binary8)','(t1.q_binary_null)', + '(t2.q_binary)','(t2.q_binary1)','(t2.q_binary2)','(t2.q_binary3)','(t2.q_binary4)','(t2.q_binary5)','(t2.q_binary6)','(t2.q_binary7)','(t2.q_binary8)','(t2.q_binary_null)'] + + column_j1 = str(random.sample(fun_fix_column_j,i)).replace("[","").replace("]","").replace("'","") + fun_column_join_1 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column_j1+')' + str_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","") + + column_j2 = str(random.sample(fun_fix_column_j,i)).replace("[","").replace("]","").replace("'","") + fun_column_join_2 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column_j2+')' + str_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","") + + fun_fix_column_s = ['q_nchar','q_nchar1','q_nchar2','q_nchar3','q_nchar4','q_nchar5','q_nchar6','q_nchar7','q_nchar8','loc','q_nchar_null', + 'q_binary','q_binary1','q_binary2','q_binary3','q_binary4','q_binary5','q_binary6','q_binary7','q_binary8','q_binary_null'] + + column_s1 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") + fun_column_s_1 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column_s1+')' + str_fun_s_1 = str(fun_column_s_1).replace("[","").replace("]","").replace("'","") + + column_s2 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") + fun_column_s_2 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column_s2+')' + str_fun_s_2 = str(fun_column_s_2).replace("[","").replace("]","").replace("'","") + + fun_fix_column_s_j = ['(t1.q_nchar)','(t1.q_nchar1)','(t1.q_nchar2)','(t1.q_nchar3)','(t1.q_nchar4)','(t1.q_nchar5)','(t1.q_nchar6)','(t1.q_nchar7)','(t1.q_nchar8)','(t1.q_nchar_null)','(t1.loc)', + '(t2.q_nchar)','(t2.q_nchar1)','(t2.q_nchar2)','(t2.q_nchar3)','(t2.q_nchar4)','(t2.q_nchar5)','(t2.q_nchar6)','(t2.q_nchar7)','(t2.q_nchar8)','(t2.q_nchar_null)','(t2.loc)', + '(t1.q_binary)','(t1.q_binary1)','(t1.q_binary2)','(t1.q_binary3)','(t1.q_binary4)','(t1.q_binary5)','(t1.q_binary6)','(t1.q_binary7)','(t1.q_binary8)','(t1.q_binary_null)', + '(t2.q_binary)','(t2.q_binary1)','(t2.q_binary2)','(t2.q_binary3)','(t2.q_binary4)','(t2.q_binary5)','(t2.q_binary6)','(t2.q_binary7)','(t2.q_binary8)','(t2.q_binary_null)'] + + column_j_s1 = str(random.sample(fun_fix_column_s_j,i)).replace("[","").replace("]","").replace("'","") + fun_column_join_s_1 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column_j_s1+')' + str_fun_join_s_1 = str(fun_column_join_s_1).replace("[","").replace("]","").replace("'","") + + column_j_s2 = str(random.sample(fun_fix_column_s_j,i)).replace("[","").replace("]","").replace("'","") + fun_column_join_s_2 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column_j_s2+')' + str_fun_join_s_2 = str(fun_column_join_s_2).replace("[","").replace("]","").replace("'","") + + + tdSql.query("select 1-1 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']) : + sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select " + sql += "%s as asct1, " % str_fun_1 + sql += "%s as asct2, " % str_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select " + sql += "%s as asct1, " % str_fun_1 + sql += "%s as asct2, " % str_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-2 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']) : + sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select " + sql += "%s as asct1, " % str_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s )" % random.choice(self.order_where) + sql += "%s " % random.choice(self.unionall_or_union) + sql += "select ts , asct2 from ( select " + sql += "%s as asct2, " % str_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + #sql += "%s " % random.choice(having_support) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select " + sql += "%s as asct1, " % str_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s )" % random.choice(self.order_where) + sql += "%s " % random.choice(self.unionall_or_union) + sql += "select ts , asct2 from ( select " + sql += "%s as asct2, " % str_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + #sql += "%s " % random.choice(having_support) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + + tdSql.query("select 1-3 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select " + sql += "%s as asct1, ts ," % str_fun_1 + sql += "%s as asct2, " % str_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s select " % random.choice(self.unionall_or_union) + sql += "%s as asct2, ts ," % str_fun_2 + sql += "%s as asct1, " % str_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15473 tdSql.query(sql) + #self.cur1.execute(sql) + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select " + sql += "%s as asct1, ts ," % str_fun_1 + sql += "%s as asct2, " % str_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s select " % random.choice(self.unionall_or_union) + sql += "%s as asct2, ts ," % str_fun_2 + sql += "%s as asct1, " % str_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15473 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 1-4 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select t1.ts as ts," + sql += "%s as asct2, " % str_fun_join_1 + sql += "%s as asct1, " % str_fun_join_2 + sql += "%s, " % str_fun_join_1 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select t1.ts as ts," + sql += "%s as asct2, " % str_fun_join_1 + sql += "%s as asct1, " % str_fun_join_2 + sql += "%s, " % str_fun_join_1 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-5 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts ," + sql += "%s, " % str_fun_1 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s " % str_fun_2 + sql += " from ( select * from regular_table_1 ) where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += " ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select ts ," + sql += "%s, " % str_fun_1 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s " % str_fun_2 + sql += " from ( select * from regular_table_1 ) where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += " ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + + tdSql.query("select 1-6 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select t1.ts as ts," + sql += "%s as asct2, " % str_fun_join_1 + sql += "%s as asct1, " % str_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % str_fun_join_1 + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s )" % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select t1.ts as ts," + sql += "%s as asct2, " % str_fun_join_1 + sql += "%s as asct1, " % str_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % str_fun_join_1 + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s )" % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-7 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select " + sql += "%s as asct1, ts ," % str_fun_s_1 + sql += "%s as asct2, " % str_fun_s_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # tdSql.checkRows(300) + # self.cur1.execute(sql)# TD-16039 + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select " + sql += "%s as asct1, ts ," % str_fun_s_1 + sql += "%s as asct2, " % str_fun_s_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + + tdSql.query("select 1-8 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts, LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) " + sql += "from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s as asct1, ts ," % str_fun_s_1 + sql += "%s as asct2, " % str_fun_s_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # tdSql.checkRows(300) + # self.cur1.execute(sql)# TD-16039 + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) " + sql += "from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s as asct1, ts ," % str_fun_s_1 + sql += "%s as asct2, " % str_fun_s_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + + tdSql.query("select 1-9 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select t1.ts as ts," + sql += "%s as asct2, " % str_fun_join_s_1 + sql += "%s as asct1, " % str_fun_join_s_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "and %s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select t1.ts as ts," + sql += "%s as asct2, " % str_fun_join_s_1 + sql += "%s as asct1, " % str_fun_join_s_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "and %s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + + self.restartDnodes() + tdSql.query("select 1-10 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select " + sql += "%s as asct1, ts ," % str_fun_s_1 + sql += "%s as asct2, " % str_fun_s_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ") %s " % random.choice(self.unionall_or_union) + sql += "select ts , max(asct2) from ( select " + sql += "%s as asct1, ts ," % str_fun_1 + sql += "%s as asct2, " % str_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select " + sql += "%s as asct1, ts ," % str_fun_s_1 + sql += "%s as asct2, " % str_fun_s_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ") %s " % random.choice(self.unionall_or_union) + sql += "select ts , max(asct2) from ( select " + sql += "%s as asct1, ts ," % str_fun_1 + sql += "%s as asct2, " % str_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + + #3 inter union not support + tdSql.query("select 1-11 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select " + sql += "%s as asct1, ts ," % str_fun_s_1 + sql += "%s as asct2, " % str_fun_s_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + #sql += "%s " % random.choice(limit1_where) + sql += " %s " % random.choice(self.unionall_or_union) + sql += " select " + sql += "%s as asct1, ts ," % str_fun_1 + sql += "%s as asct2, " % str_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15837 tdSql.query(sql) + # self.cur1.execute(sql) + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select " + sql += "%s as asct1, ts ," % str_fun_s_1 + sql += "%s as asct2, " % str_fun_s_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + #sql += "%s " % random.choice(limit1_where) + sql += " %s " % random.choice(self.unionall_or_union) + sql += " select " + sql += "%s as asct1, ts ," % str_fun_1 + sql += "%s as asct2, " % str_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15837 tdSql.query(sql) + # self.cur1.execute(sql) + + tdSql.query("select 1-12 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select t1.ts as ts," + sql += "%s as asct2, " % str_fun_join_s_1 + sql += "%s as asct1, " % str_fun_join_s_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select t1.ts as ts," + sql += "%s as asct2, " % str_fun_join_s_1 + sql += "%s as asct1, " % str_fun_join_s_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + + tdSql.query("select 1-13 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts ," + sql += "%s, " % str_fun_1 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % str_fun_2 + sql += "%s " % random.choice(self.t_select) + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # tdSql.checkRows(300) + # self.cur1.execute(sql) # TD-16039 + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select ts ," + sql += "%s, " % str_fun_1 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % str_fun_2 + sql += "%s " % random.choice(self.t_select) + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # tdSql.checkRows(300) + # self.cur1.execute(sql)# TD-16039 + + tdSql.query("select 1-14 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select " + sql += "%s as asct1, " % str_fun_s_1 + sql += "%s as asct2" % str_fun_s_2 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.partiton_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += " ) ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select " + sql += "%s as asct1, " % str_fun_s_1 + sql += "%s as asct2" % str_fun_s_2 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.partiton_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += " ) ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + + tdSql.query("select 1-15 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select t1.ts as ts," + sql += "%s as asct2, " % str_fun_join_s_1 + sql += "%s as asct1, " % str_fun_join_s_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s " % random.choice(self.q_select) + sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += " and %s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.partiton_where_j) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") " + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15955 tdSql.query(sql) + #TD-15955 self.cur1.execute(sql) + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select t1.ts as ts," + sql += "%s as asct2, " % str_fun_join_s_1 + sql += "%s as asct1, " % str_fun_join_s_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s " % random.choice(self.q_select) + sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += " and %s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.partiton_where_j) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") " + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15955 tdSql.query(sql) + #TD-15955 self.cur1.execute(sql) + + #taos -f sql + startTime_taos_f = time.time() + print("taos -f %s sql start!" %strlist) + taos_cmd1 = "taos -f %s/%s.sql" % (self.testcasePath,self.testcaseFilename) + _ = subprocess.check_output(taos_cmd1, shell=True).decode("utf-8") + print("taos -f %s sql over!" %strlist) + endTime_taos_f = time.time() + print("taos_f total time %ds" % (endTime_taos_f - startTime_taos_f)) + + print("=========%s====over=============" %strlist) + + def time_nest(self,timelist): + + print("==========%s===start=============" %timelist) + os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename)) + + self.dropandcreateDB_random("%s" %self.db_nest, 1) + + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMEZONE']): + time_functions = timelist + fun_fix_column = ['()'] + fun_column_1 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_2 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + fun_fix_column_j = ['()'] + fun_column_join_1 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_join_2 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + elif (timelist == ['TIMETRUNCATE']): + time_functions = timelist + + t = time.time() + t_to_s = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(t)) + fun_fix_column = ['q_ts','ts','_c0','_C0','_rowts','1600000000000','1600000000000000','1600000000000000000', + '%d' %t, '%d000' %t, '%d000000' %t,'t_to_s'] + + timeunits = ['1u' , '1a' ,'1s', '1m' ,'1h', '1d'] + timeunit = str(random.sample(timeunits,1)).replace("[","").replace("]","").replace("'","") + + column_1 = ['(%s,timeutil)'%(random.sample(fun_fix_column,1))] + fun_column_1 = random.sample(time_functions,1)+random.sample(column_1,1) + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("\"","").replace("t_to_s","'t_to_s'") + time_fun_1 = str(time_fun_1).replace("timeutil","%s" %timeunit).replace("t_to_s","%s" %t_to_s) + + column_2 = ['(%s,timeutil)'%(random.sample(fun_fix_column,1))] + fun_column_2 = random.sample(time_functions,1)+random.sample(column_2,1) + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("\"","").replace("t_to_s","'t_to_s'") + time_fun_2 = str(time_fun_2).replace("timeutil","%s" %timeunit).replace("t_to_s","%s" %t_to_s) + + + fun_fix_column_j = ['(t1.q_ts)','(t1.ts)', '(t2.q_ts)','(t2.ts)','(1600000000000)','(1600000000000000)','(1600000000000000000)', + '(%d)' %t, '(%d000)' %t, '(%d000000)' %t,'t_to_s'] + + column_j1 = ['(%s,timeutil)'%(random.sample(fun_fix_column_j,1))] + fun_column_join_1 = random.sample(time_functions,1)+random.sample(column_j1,1) + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("\"","").replace("t_to_s","'t_to_s'") + time_fun_join_1 = str(time_fun_join_1).replace("timeutil","%s" %timeunit).replace("t_to_s","%s" %t_to_s) + + column_j2 = ['(%s,timeutil)'%(random.sample(fun_fix_column_j,1))] + fun_column_join_2 = random.sample(time_functions,1)+random.sample(column_j2,1) + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("\"","").replace("t_to_s","'t_to_s'") + time_fun_join_2 = str(time_fun_join_2).replace("timeutil","%s" %timeunit).replace("t_to_s","%s" %t_to_s) + + elif (timelist == ['TO_ISO8601']): + time_functions = timelist + + t = time.time() + fun_fix_column = ['(now())','(ts)','(q_ts)','(_rowts)','(_c0)','(_C0)', + '(1600000000000)','(1600000000000000)','(1600000000000000000)', + '(%d)' %t, '(%d000)' %t, '(%d000000)' %t] + + fun_column_1 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","") + + fun_column_2 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + fun_fix_column_j = ['(t1.q_ts)','(t1.ts)', '(t2.q_ts)','(t2.ts)','(1600000000000)','(1600000000000000)','(1600000000000000000)','(now())', + '(%d)' %t, '(%d000)' %t, '(%d000000)' %t] + + fun_column_join_1 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","") + + fun_column_join_2 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + elif (timelist == ['TO_UNIXTIMESTAMP']): + time_functions = timelist + + t = time.time() + t_to_s = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(t)) + fun_fix_column = ['(q_nchar)','(q_nchar1)','(q_nchar2)','(q_nchar3)','(q_nchar4)','(q_nchar_null)','(q_binary)','(q_binary5)','(q_binary6)','(q_binary7)','(q_binary8)','(q_binary_null)','(t_to_s)'] + + fun_column_1 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'") + time_fun_1 = str(time_fun_1).replace("t_to_s","%s" %t_to_s) + + fun_column_2 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'") + time_fun_2 = str(time_fun_2).replace("t_to_s","%s" %t_to_s) + + fun_fix_column_j = ['(t1.q_nchar)','(t1.q_binary)', '(t2.q_nchar)','(t2.q_binary)','(t_to_s)'] + + fun_column_join_1 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'") + time_fun_join_1 = str(time_fun_join_1).replace("t_to_s","%s" %t_to_s) + + fun_column_join_2 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'") + time_fun_join_2 = str(time_fun_join_2).replace("t_to_s","%s" %t_to_s) + + elif (timelist == ['TIMEDIFF']): + time_functions = timelist + + t = time.time() + t_to_s = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(t)) + fun_fix_column = ['(q_nchar)','(q_nchar1)','(q_nchar2)','(q_nchar3)','(q_nchar4)','(q_nchar_null)','(q_binary)','(q_binary5)','(q_binary6)','(q_binary7)','(q_binary8)','(q_binary_null)','(t_to_s)'] + + fun_column_1 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'") + time_fun_1 = str(time_fun_1).replace("t_to_s","%s" %t_to_s) + + fun_column_2 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'") + time_fun_2 = str(time_fun_2).replace("t_to_s","%s" %t_to_s) + + fun_fix_column_j = ['(t1.q_nchar)','(t1.q_binary)', '(t2.q_nchar)','(t2.q_binary)','(t_to_s)'] + + fun_column_join_1 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'") + time_fun_join_1 = str(time_fun_join_1).replace("t_to_s","%s" %t_to_s) + + fun_column_join_2 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'") + time_fun_join_2 = str(time_fun_join_2).replace("t_to_s","%s" %t_to_s) + + elif (timelist == ['ELAPSED']): + time_functions = timelist + + fun_fix_column = ['(ts)','(q_ts)','(_c0)','(_C0)','(_rowts)','(ts,time_unit)','(_c0,time_unit)','(_C0,time_unit)','(_rowts,time_unit)'] + + time_units = ['nums','numm','numh','numd','numa'] + time_unit = str(random.sample(time_units,1)).replace("[","").replace("]","").replace("'","") + time_num1 = random.randint(0, 1000) + time_unit1 = time_unit.replace("num","%d" %time_num1) + time_num2 = random.randint(0, 1000) + time_unit2 = time_unit.replace("num","%d" %time_num2) + + fun_column_1 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("time_unit","%s" %time_unit1) + + fun_column_2 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("time_unit","%s" %time_unit2) + + + fun_fix_column_j = ['(t1.ts)','(t1.q_ts)', '(t2.ts)','(t2.q_ts)','(t1.ts,time_unit)','(t1.q_ts,time_unit)','(t2.ts,time_unit)','(t2.q_ts,time_unit)'] + + fun_column_join_1 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("time_unit","%s" %time_unit1) + + fun_column_join_2 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("time_unit","%s" %time_unit2) + + + elif (timelist == ['CAST']) : + str_functions = timelist + #下面的4个是全的,这个只是1个 + i = random.randint(1,4) + if i ==1: + print('===========cast_1===========') + fun_fix_column = ['q_bool','q_bool_null','q_bigint','q_bigint_null','q_smallint','q_smallint_null', + 'q_tinyint','q_tinyint_null','q_int','q_int_null','q_float','q_float_null','q_double','q_double_null'] + type_names = ['BIGINT','BINARY(100)','TIMESTAMP','NCHAR(100)','BIGINT UNSIGNED'] + + type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')' + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","") + + type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')' + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","") + + fun_fix_column_j = ['t1.q_bool','t1.q_bool_null','t1.q_bigint','t1.q_bigint_null','t1.q_smallint','t1.q_smallint_null', + 't1.q_tinyint','t1.q_tinyint_null','t1.q_int','t1.q_int_null','t1.q_float','t1.q_float_null','t1.q_double','t1.q_double_null', + 't2.q_bool','t2.q_bool_null','t2.q_bigint','t2.q_bigint_null','t2.q_smallint','t2.q_smallint_null', + 't2.q_tinyint','t2.q_tinyint_null','t2.q_int','t2.q_int_null','t2.q_float','t2.q_float_null','t2.q_double','t2.q_double_null'] + + type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')' + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","") + + type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')' + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","") + + elif i==2: + print('===========cast_2===========') + fun_fix_column = ['q_binary','q_binary_null','q_binary1','q_binary2','q_binary3','q_binary4'] + type_names = ['BIGINT','BINARY(100)','NCHAR(100)','BIGINT UNSIGNED'] + + type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')' + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","") + + type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')' + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","") + + fun_fix_column_j = ['t1.q_binary','t1.q_binary_null','t1.q_binary1','t1.q_binary2','t1.q_binary3','t1.q_smallint_null','t1.q_binary4', + 't2.q_binary','t2.q_binary_null','t2.q_bigint','t2.q_binary1','t2.q_binary2','t2.q_binary3','t2.q_binary4'] + + type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')' + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","") + + type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')' + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","") + + elif i==3: + print('===========cast_3===========') + fun_fix_column = ['q_nchar','q_nchar_null','q_nchar5','q_nchar6','q_nchar7','q_nchar8'] + type_names = ['BIGINT','NCHAR(100)','BIGINT UNSIGNED'] + + type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')' + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","") + + type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')' + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","") + + fun_fix_column_j = ['t1.q_nchar','t1.q_nchar_null','t1.q_nchar5','t1.q_nchar6','t1.q_nchar7','t1.q_nchar8', + 't2.q_nchar','t2.q_nchar_null','t2.q_nchar5','t2.q_nchar6','t2.q_nchar7','t2.q_nchar8'] + + type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')' + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","") + + type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')' + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","") + + elif i==4: + print('===========cast_4===========') + fun_fix_column = ['q_ts','q_ts_null','_C0','_c0','ts','_rowts'] + type_names = ['BIGINT','TIMESTAMP','BIGINT UNSIGNED'] + + type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')' + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","") + + type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')' + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","") + + fun_fix_column_j = ['t1.q_ts','t1.q_ts_null','t1.ts','t2.q_ts','t2.q_ts_null','t2.ts'] + + type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')' + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","") + + type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')' + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","") + + elif (timelist == ['CAST_1']) : + str_functions = timelist + + print('===========cast_1===========') + fun_fix_column = ['q_bool','q_bool_null','q_bigint','q_bigint_null','q_smallint','q_smallint_null', + 'q_tinyint','q_tinyint_null','q_int','q_int_null','q_float','q_float_null','q_double','q_double_null'] + type_names = ['BIGINT','BINARY(100)','TIMESTAMP','NCHAR(100)','BIGINT UNSIGNED'] + + type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')' + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace("_1","") + + type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')' + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace("_1","") + + fun_fix_column_j = ['t1.q_bool','t1.q_bool_null','t1.q_bigint','t1.q_bigint_null','t1.q_smallint','t1.q_smallint_null', + 't1.q_tinyint','t1.q_tinyint_null','t1.q_int','t1.q_int_null','t1.q_float','t1.q_float_null','t1.q_double','t1.q_double_null', + 't2.q_bool','t2.q_bool_null','t2.q_bigint','t2.q_bigint_null','t2.q_smallint','t2.q_smallint_null', + 't2.q_tinyint','t2.q_tinyint_null','t2.q_int','t2.q_int_null','t2.q_float','t2.q_float_null','t2.q_double','t2.q_double_null'] + + type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')' + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace("_1","") + + type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')' + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace("_1","") + + elif (timelist == ['CAST_2']) : + str_functions = timelist + print('===========cast_2===========') + fun_fix_column = ['q_binary','q_binary_null','q_binary1','q_binary2','q_binary3','q_binary4'] + type_names = ['BIGINT','BINARY(100)','NCHAR(100)','BIGINT UNSIGNED'] + + type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')' + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace("_2","") + + type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')' + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace("_2","") + + fun_fix_column_j = ['t1.q_binary','t1.q_binary_null','t1.q_binary1','t1.q_binary2','t1.q_binary3','t1.q_smallint_null','t1.q_binary4', + 't2.q_binary','t2.q_binary_null','t2.q_bigint','t2.q_binary1','t2.q_binary2','t2.q_binary3','t2.q_binary4'] + + type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')' + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace("_2","") + + type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')' + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace("_2","") + + elif (timelist == ['CAST_3']) : + str_functions = timelist + print('===========cast_3===========') + fun_fix_column = ['q_nchar','q_nchar_null','q_nchar5','q_nchar6','q_nchar7','q_nchar8'] + type_names = ['BIGINT','NCHAR(100)','BIGINT UNSIGNED'] + + type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')' + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace("_3","") + + type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')' + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace("_3","") + + fun_fix_column_j = ['t1.q_nchar','t1.q_nchar_null','t1.q_nchar5','t1.q_nchar6','t1.q_nchar7','t1.q_nchar8', + 't2.q_nchar','t2.q_nchar_null','t2.q_nchar5','t2.q_nchar6','t2.q_nchar7','t2.q_nchar8'] + + type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')' + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace("_3","") + + type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')' + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace("_3","") + + elif (timelist == ['CAST_4']) : + str_functions = timelist + print('===========cast_4===========') + fun_fix_column = ['q_ts','q_ts_null','_C0','_c0','ts','_rowts'] + type_names = ['BIGINT','TIMESTAMP','BIGINT UNSIGNED'] + + type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')' + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace("_4","") + + type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')' + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace("_4","") + + fun_fix_column_j = ['t1.q_ts','t1.q_ts_null','t1.ts','t2.q_ts','t2.q_ts_null','t2.ts'] + + type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')' + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace("_4","") + + type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')' + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace("_4","") + + tdSql.query("select 1-1 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now) from ( select " + sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + elif (timelist == ['TIMEZONE']) \ + or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , asct1,now(),today(),timezone() from ( select " + sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + elif (timelist == ['ELAPSED']) : + sql = "select max(asct1),now(),today(),timezone() from ( select " + sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct2 " % time_fun_2 + sql += "from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-2 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now),now(),today(),timezone() from ( select " + sql += "%s as asct1, " % time_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s )" % random.choice(self.order_where) + sql += "%s " % random.choice(self.unionall_or_union) + sql += "select ts , timediff(asct2,now),now(),today(),timezone() from ( select " + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + #sql += "%s " % random.choice(having_support) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , (asct1),now(),today(),timezone() from ( select " + sql += "%s as asct1, " % time_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s )" % random.choice(self.order_where) + sql += "%s " % random.choice(self.unionall_or_union) + sql += "select ts , asct2,now(),today(),timezone() from ( select " + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + elif (timelist == ['ELAPSED']) : + sql = "select min(asct1),now(),today(),timezone() from ( select " + sql += "%s as asct1 " % time_fun_1 + sql += " from regular_table_1 where " + sql += "%s )" % random.choice(self.q_where) + sql += "%s " % random.choice(self.unionall_or_union) + sql += "select avg(asct2),now(),today(),timezone() from ( select " + sql += "%s as asct2 " % time_fun_2 + sql += " from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-3 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now) from ( select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s select " % random.choice(self.unionall_or_union) + sql += "%s as asct2, ts ," % time_fun_2 + sql += "%s as asct1, " % time_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15473 tdSql.query(sql) + #self.cur1.execute(sql) + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , (asct1),now(),today(),timezone() from ( select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s select " % random.choice(self.unionall_or_union) + sql += "%s as asct2, ts ," % time_fun_2 + sql += "%s as asct1, " % time_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15473 tdSql.query(sql) + #self.cur1.execute(sql) + elif (timelist == ['ELAPSED']) : + sql = "select abs(asct1),now(),today(),timezone() from ( select " + sql += "%s as asct1," % time_fun_1 + sql += "%s as asct2 " % time_fun_2 + sql += "from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s select " % random.choice(self.unionall_or_union) + sql += "%s as asct2," % time_fun_2 + sql += "%s as asct1 " % time_fun_1 + sql += "from regular_table_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-4 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now) from ( select t1.ts as ts," + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "%s, " % time_fun_join_1 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , (asct1) from ( select t1.ts as ts," + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "%s, " % time_fun_join_1 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + elif (timelist == ['ELAPSED']) : + sql = "select floor(asct1) from ( select " + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "%s " % time_fun_join_1 + sql += " from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s " % random.choice(self.q_u_or_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-5 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['ELAPSED']) : + sql = "select now(),today(),timezone(), " + sql += "%s, " % time_fun_1 + sql += "%s " % time_fun_2 + sql += " from ( select * from regular_table_1 ) where " + sql += "%s " % random.choice(self.q_where) + sql += " ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + else: + sql = "select ts ,now(),today(),timezone(), " + sql += "%s, " % time_fun_1 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s " % time_fun_2 + sql += " from ( select * from regular_table_1 ) where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += " ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + + tdSql.query("select 1-6 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now) from ( select t1.ts as ts," + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % time_fun_join_1 + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s )" % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , (asct1) from ( select t1.ts as ts," + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % time_fun_join_1 + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s )" % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + elif (timelist == ['ELAPSED']) : + sql = "select (asct1)*111 from ( select " + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "%s " % time_fun_join_1 + sql += " from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s )" % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-7 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now) from ( select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) # TD-16039 + # tdSql.checkRows(300) + # self.cur1.execute(sql) + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , (asct1),now(),today(),timezone() from ( select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) # TD-16039 + # tdSql.checkRows(300) + # self.cur1.execute(sql) + elif (timelist == ['ELAPSED']) : + sql = "select (asct1)/asct2 ,now(),today(),timezone() from ( select " + sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct2 " % time_fun_2 + sql += "from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) #同时出现core:TD-16095和TD-16042 + # self.cur1.execute(sql) + + tdSql.query("select 1-8 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now) " + sql += "from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) # TD-16039 + # tdSql.checkRows(300) + # self.cur1.execute(sql) + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , (asct1),now(),today(),timezone() " + sql += "from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) # TD-16039 + # tdSql.checkRows(300) + # self.cur1.execute(sql) + elif (timelist == ['ELAPSED']) : + sql = "select floor(abs(asct1)),now(),today(),timezone() " + sql += "from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) # TD-16039 + # self.cur1.execute(sql) + + tdSql.query("select 1-9 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now) from ( select t1.ts as ts," + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "and %s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) TD-16039 + # self.cur1.execute(sql) TD-16039 + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , asct1 from ( select t1.ts as ts," + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "and %s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + elif (timelist == ['ELAPSED']) : + sql = "select min(asct1*110) from ( select " + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1 " % time_fun_join_2 + sql += "from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "and %s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + + self.restartDnodes() + tdSql.query("select 1-10 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now) from ( select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ") %s " % random.choice(self.unionall_or_union) + sql += "select ts , max(asct2) from ( select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , (asct1),now(),today(),timezone() from ( select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ") %s " % random.choice(self.unionall_or_union) + sql += "select ts , max(asct2),now(),today(),timezone() from ( select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + elif (timelist == ['ELAPSED']) : + sql = "select abs(asct1),now(),today(),timezone() from ( select " + sql += "%s as asct1 ," % time_fun_1 + sql += "%s as asct2 " % time_fun_2 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += ") %s " % random.choice(self.unionall_or_union) + sql += "select max(asct2),now(),today(),timezone() from ( select " + sql += "%s as asct1 ," % time_fun_1 + sql += "%s as asct2 " % time_fun_2 + sql += "from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + + #3 inter union not support + tdSql.query("select 1-11 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now), timediff(now,asct2) from ( select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += " %s " % random.choice(self.unionall_or_union) + sql += " select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql)#TD-15473 + # self.cur1.execute(sql)#TD-15473 + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , (asct1,now()),(now(),asct2) from ( select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += " %s " % random.choice(self.unionall_or_union) + sql += " select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql)#TD-15473 + # self.cur1.execute(sql)#TD-15473 + elif (timelist == ['ELAPSED']) : + sql = "select asct1+asct2,now(),today(),timezone() from ( select " + sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct2 " % time_fun_2 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += " %s " % random.choice(self.unionall_or_union) + sql += " select " + sql += "%s as asct1 ," % time_fun_1 + sql += "%s as asct2 " % time_fun_2 + sql += " from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql)#TD-15473 + self.cur1.execute(sql)#TD-15473 + + tdSql.query("select 1-12 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now) from ( select t1.ts as ts," + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , asct1,now() from ( select t1.ts as ts," + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + elif (timelist == ['ELAPSED']) : + sql = "select min(floor(asct1)),now() from ( select " + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1 " % time_fun_join_2 + sql += " from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + + tdSql.query("select 1-13 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(%s,now)," % time_fun_2 + sql += "%s as asct1, " % time_fun_1 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s " % time_fun_2 + sql += "%s " % random.choice(self.t_select) + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # tdSql.checkRows(300) + # self.cur1.execute(sql) # TD-16039 + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts ,now(),today(),timezone(), " + sql += "%s as asct1, " % time_fun_1 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s " % time_fun_2 + sql += "%s " % random.choice(self.t_select) + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # tdSql.checkRows(300) + # self.cur1.execute(sql) # TD-16039 + elif (timelist == ['ELAPSED']) : + sql = "select now(),today(),timezone(), " + sql += "%s as asct1, " % time_fun_1 + sql += "%s " % time_fun_2 + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + + tdSql.query("select 1-14 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now),timediff(now,asct2) from ( select ts ts ," + sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct2" % time_fun_2 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.partiton_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += " ) ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , (asct1),now(),(now()),asct2 from ( select ts ts ," + sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct2" % time_fun_2 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.partiton_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += " ) ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + elif (timelist == ['ELAPSED']) : + sql = "select ts , (asct1)*asct2,now(),(now()) from ( select " + sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct2" % time_fun_2 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.partiton_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += " ) ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + + tdSql.query("select 1-15 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now),timediff(now,asct2) from ( select t1.ts as ts," + sql += "%s as asct2, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s " % random.choice(self.q_select) + sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += " and %s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.partiton_where_j) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") " + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , asct1,(now()),(now()),asct2 ,now(),today(),timezone() from ( select t1.ts as ts," + sql += "%s as asct2, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s " % random.choice(self.q_select) + sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += " and %s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.partiton_where_j) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") " + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + elif (timelist == ['ELAPSED']) : + sql = "select asct1,(now()),(now()),asct2 ,now(),today(),timezone() from ( select " + sql += "%s as asct2, " % time_fun_join_1 + sql += "%s as asct1 " % time_fun_join_2 + sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += " and %s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.partiton_where_j) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") " + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) # TD-16039 + + #taos -f sql + startTime_taos_f = time.time() + print("taos -f %s sql start!" %timelist) + taos_cmd1 = "taos -f %s/%s.sql" % (self.testcasePath,self.testcaseFilename) + _ = subprocess.check_output(taos_cmd1, shell=True).decode("utf-8") + print("taos -f %s sql over!" %timelist) + endTime_taos_f = time.time() + print("taos_f total time %ds" % (endTime_taos_f - startTime_taos_f)) + + print("=========%s====over=============" %timelist) + + def base_nest(self,baselist): + + print("==========%s===start=============" %baselist) + os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename)) + + self.dropandcreateDB_random("%s" %self.db_nest, 1) + + if (baselist == ['A']) or (baselist == ['S']) or (baselist == ['F']) \ + or (baselist == ['C']): + base_functions = baselist + fun_fix_column = ['(q_bigint)','(q_smallint)','(q_tinyint)','(q_int)','(q_float)','(q_double)','(q_bigint_null)','(q_smallint_null)','(q_tinyint_null)','(q_int_null)','(q_float_null)','(q_double_null)'] + fun_column_1 = random.sample(base_functions,1)+random.sample(fun_fix_column,1) + base_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_2 = random.sample(base_functions,1)+random.sample(fun_fix_column,1) + base_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + fun_fix_column_j = ['(t1.q_bigint)','(t1.q_smallint)','(t1.q_tinyint)','(t1.q_int)','(t1.q_float)','(t1.q_double)','(t1.q_bigint_null)','(t1.q_smallint_null)','(t1.q_tinyint_null)','(t1.q_int_null)','(t1.q_float_null)','(t1.q_double_null)', + '(t2.q_bigint)','(t2.q_smallint)','(t2.q_tinyint)','(t2.q_int)','(t2.q_float)','(t2.q_double)','(t2.q_bigint_null)','(t2.q_smallint_null)','(t2.q_tinyint_null)','(t2.q_int_null)','(t2.q_float_null)','(t2.q_double_null)'] + fun_column_join_1 = random.sample(base_functions,1)+random.sample(fun_fix_column_j,1) + base_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_join_2 = random.sample(base_functions,1)+random.sample(fun_fix_column_j,1) + base_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + elif (baselist == ['P']) or (baselist == ['M']) or (baselist == ['S'])or (baselist == ['T']): + base_functions = baselist + num = random.randint(0, 1000) + fun_fix_column = ['(q_bigint,num)','(q_smallint,num)','(q_tinyint,num)','(q_int,num)','(q_float,num)','(q_double,num)', + '(q_bigint_null,num)','(q_smallint_null,num)','(q_tinyint_null,num)','(q_int_null,num)','(q_float_null,num)','(q_double_null,num)'] + fun_column_1 = random.sample(base_functions,1)+random.sample(fun_fix_column,1) + base_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",base(num)) + fun_column_2 = random.sample(base_functions,1)+random.sample(fun_fix_column,1) + base_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",base(num)) + + fun_fix_column_j = ['(t1.q_bigint,num)','(t1.q_smallint,num)','(t1.q_tinyint,num)','(t1.q_int,num)','(t1.q_float,num)','(t1.q_double,num)', + '(t1.q_bigint_null,num)','(t1.q_smallint_null,num)','(t1.q_tinyint_null,num)','(t1.q_int_null,num)','(t1.q_float_null,num)','(t1.q_double_null,num)', + '(t2.q_bigint,num)','(t2.q_smallint,num)','(t2.q_tinyint,num)','(t2.q_int,num)','(t2.q_float,num)','(t2.q_double,num)', + '(t2.q_bigint_null,num)','(t2.q_smallint_null,num)','(t2.q_tinyint_null,num)','(t2.q_int_null,num)','(t2.q_float_null,num)','(t2.q_double_null,num)'] + fun_column_join_1 = random.sample(base_functions,1)+random.sample(fun_fix_column_j,1) + base_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",base(num)) + fun_column_join_2 = random.sample(base_functions,1)+random.sample(fun_fix_column_j,1) + base_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",base(num)) + + tdSql.query("select 1-1 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts , floor(asct1) from ( select " + sql += "%s as asct1, " % base_fun_1 + sql += "%s as asct2, " % base_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + + tdSql.query("select 1-2 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts , abs(asct1) from ( select " + sql += "%s as asct1, " % base_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s )" % random.choice(self.order_where) + sql += "%s " % random.choice(self.unionall_or_union) + sql += "select ts , asct2 from ( select " + sql += "%s as asct2, " % base_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + #sql += "%s " % random.choice(having_support) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + + tdSql.query("select 1-3 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts , min(asct1) from ( select " + sql += "%s as asct1, ts ," % base_fun_1 + sql += "%s as asct2, " % base_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s select " % random.choice(self.unionall_or_union) + sql += "%s as asct2, ts ," % base_fun_2 + sql += "%s as asct1, " % base_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15473 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 1-4 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts , asct1 from ( select t1.ts as ts," + sql += "%s, " % base_fun_join_1 + sql += "%s as asct1, " % base_fun_join_2 + sql += "%s, " % base_fun_join_1 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + + tdSql.query("select 1-5 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts ," + sql += "%s, " % base_fun_1 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s " % base_fun_2 + sql += " from ( select * from regular_table_1 ) where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += " ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + + tdSql.query("select 1-6 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts , max(asct1) from ( select t1.ts as ts," + sql += "%s, " % base_fun_join_1 + sql += "%s as asct1, " % base_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % base_fun_join_1 + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s )" % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-7 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts , abs(asct1) from ( select " + sql += "%s as asct1, ts ," % base_fun_1 + sql += "%s as asct2, " % base_fun_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(300) + self.cur1.execute(sql) + + tdSql.query("select 1-8 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts,floor(asct1) " + sql += "from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s as asct1, ts ," % base_fun_1 + sql += "%s as asct2, " % base_fun_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(300) + self.cur1.execute(sql) + + tdSql.query("select 1-9 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts , max(asct1) from ( select t1.ts as ts," + sql += "%s, " % base_fun_join_1 + sql += "%s as asct1, " % base_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "and %s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + self.restartDnodes() + tdSql.query("select 1-10 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts , min(asct1) from ( select " + sql += "%s as asct1, ts ," % base_fun_1 + sql += "%s as asct2, " % base_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ") %s " % random.choice(self.unionall_or_union) + sql += "select ts , max(asct2) from ( select " + sql += "%s as asct1, ts ," % base_fun_1 + sql += "%s as asct2, " % base_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + + #3 inter union not support + tdSql.query("select 1-11 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts , min(asct1), max(asct2) from ( select " + sql += "%s as asct1, ts ," % base_fun_1 + sql += "%s as asct2, " % base_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + #sql += "%s " % random.choice(limit1_where) + sql += " %s " % random.choice(self.unionall_or_union) + sql += " select " + sql += "%s as asct1, ts ," % base_fun_1 + sql += "%s as asct2, " % base_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15837 tdSql.query(sql) + # self.cur1.execute(sql) + + tdSql.query("select 1-12 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts , max(asct1) from ( select t1.ts as ts," + sql += "%s, " % base_fun_join_1 + sql += "%s as asct1, " % base_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-13 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts ," + sql += "%s, " % base_fun_1 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s " % base_fun_2 + sql += "%s " % random.choice(self.t_select) + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(300) + self.cur1.execute(sql) + + tdSql.query("select 1-14 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select avg(asct1),count(asct2) from ( select " + sql += "%s as asct1, " % base_fun_1 + sql += "%s as asct2" % base_fun_2 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.partiton_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += " ) ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-15 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts , max(asct1) from ( select t1.ts as ts," + sql += "%s, " % base_fun_join_1 + sql += "%s as asct1, " % base_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s " % random.choice(self.q_select) + sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += " and %s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.partiton_where_j) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") " + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + #taos -f sql + startTime_taos_f = time.time() + print("taos -f %s sql start!" %baselist) + taos_cmd1 = "taos -f %s/%s.sql" % (self.testcasePath,self.testcaseFilename) + _ = subprocess.check_output(taos_cmd1, shell=True).decode("utf-8") + print("taos -f %s sql over!" %baselist) + endTime_taos_f = time.time() + print("taos_f total time %ds" % (endTime_taos_f - startTime_taos_f)) + + print("=========%s====over=============" %baselist) + + def function_before_26(self): + + print('=====================2.6 old function start ===========') + os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename)) + + self.dropandcreateDB_random("%s" %self.db_nest, 1) + + #1 select * from (select column form regular_table where <\>\in\and\or order by) + tdSql.query("select 1-1 from stable_1;") + for i in range(self.fornum): + #sql = "select ts , * from ( select " ===暂时不支持select * ,用下面这一行 + sql = "select ts from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + + #1 outer union not support + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 1-2 from stable_1;") + for i in range(self.fornum): + #sql = "select ts , * from ( select " + sql = "select ts from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ") union " + #sql += "select ts , * from ( select " + sql += "select ts from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 1-2 from stable_1;") + for i in range(self.fornum): + #sql = "select ts , * from ( select " + sql = "select ts from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ") union all " + #sql += "select ts , * from ( select " + sql += "select ts from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(200) + self.cur1.execute(sql) + + #1 inter union not support + tdSql.query("select 1-3 from stable_1;") + for i in range(self.fornum): + #sql = "select ts , * from ( select " + sql = "select ts from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "" + sql += " union select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15607 tdSql.query(sql) + #tdSql.checkRows(200) + #self.cur1.execute(sql) + + tdSql.query("select 1-3 from stable_1;") + for i in range(self.fornum): + #sql = "select ts , * from ( select " + sql = "select ts from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += " union all select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15607 tdSql.query(sql) + # tdSql.checkRows(300) + #self.cur1.execute(sql) + + #join:select * from (select column form regular_table1,regular_table2 where t1.ts=t2.ts and <\>\in\and\or order by) + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 1-4 from stable_1;") + for i in range(self.fornum): + #sql = "select ts , * from ( select t1.ts ," + sql = "select * from ( select t1.ts ," + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + + + #2 select column from (select * form regular_table ) where <\>\in\and\or order by + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 2-1 from stable_1;") + for i in range(self.fornum): + sql = "select ts ," + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s " % random.choice(self.q_select) + sql += " from ( select * from regular_table_1 ) where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += " ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + + #join: select column from (select column form regular_table1,regular_table2 )where t1.ts=t2.ts and <\>\in\and\or order by + #cross join not supported yet + tdSql.query("select 2-2 from stable_1;") + for i in range(self.fornum): + sql = "select ts , * from ( select t1.ts ," + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 ) where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "%s " % random.choice(self.order_u_where) + #sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.error(sql) + + #3 select * from (select column\tag form stable where <\>\in\and\or order by ) + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 3-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(300) + self.cur1.execute(sql) + tdSql.query("select 3-1 from stable_1;") + for i in range(self.fornum): + sql = "select ts, " + sql += "%s " % random.choice(self.s_r_select) + sql += "from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(300) + self.cur1.execute(sql) + + # select ts,* from (select column\tag form stable1,stable2 where t1.ts = t2.ts and <\>\in\and\or order by ) + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 3-2 from stable_1;") + for i in range(self.fornum): + sql = "select ts , * from ( select t1.ts , " + sql += "t1.%s, " % random.choice(self.s_s_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.s_s_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "%s " % random.choice(self.order_u_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # TD-15609 tdSql.query(sql) + # tdSql.checkRows(100) + #self.cur1.execute(sql) + + #3 outer union not support + self.restartDnodes() + tdSql.query("select 3-3 from stable_1;") + for i in range(self.fornum): + #sql = "select ts , * from ( select " + sql = "select ts from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ") union " + sql += "select ts from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(300) + self.cur1.execute(sql) + for i in range(self.fornum): + #sql = "select ts , * from ( select " + sql = "select ts from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ") union all " + sql += "select ts from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(600) + self.cur1.execute(sql) + + #3 inter union not support + tdSql.query("select 3-4 from stable_1;") + for i in range(self.fornum): + sql = "select ts , * from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += " %s " % random.choice(self.unionall_or_union) + sql += " select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15837 tdSql.query(sql) + # self.cur1.execute(sql) + + #join:select * from (select column form stable1,stable2 where t1.ts=t2.ts and <\>\in\and\or order by) + tdSql.query("select 3-5 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select t1.ts ," + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_u_where) + sql += "%s " % random.choice(self.order_u_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + # TD-15609 tdSql.query(sql) + # tdSql.checkRows(100) + #self.cur1.execute(sql) + + tdSql.query("select 3-6 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select t1.ts ," + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + # TD-15609 同上 tdSql.query(sql) + # tdSql.checkRows(100) + #self.cur1.execute(sql) + + #4 select column from (select * form stable where <\>\in\and\or order by ) + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 4-1 from stable_1;") + for i in range(self.fornum): + sql = "select ts , " + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s " % random.choice(self.t_select) + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(300) + self.cur1.execute(sql) + + #5 select distinct column\tag from (select * form stable where <\>\in\and\or order by limit offset ) + tdSql.query("select 5-1 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.dqt_select) + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15500 tdSql.query(sql) + #self.cur1.execute(sql) + + #5-1 select distinct column\tag from (select calc form stable where <\>\in\and\or order by limit offset ) + tdSql.query("select 5-2 from stable_1;") + for i in range(self.fornum): + sql = "select distinct c5_1 " + sql += " from ( select " + sql += "%s " % random.choice(self.calc_select_in_ts) + sql += " as c5_1 from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + #sql += "%s " % random.choice(order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + #tdSql.checkRows(1)有的函数还没有提交,会不返回结果,先忽略 + self.cur1.execute(sql) + + #6-error select * from (select distinct(tag) form stable where <\>\in\and\or order by limit ) + tdSql.query("select 6-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.dt_select) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.error(sql) + tdSql.query("select 6-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.dt_select) + sql += " from stable_1 where " + sql += "%s ) ;" % random.choice(self.qt_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + #7-error select * from (select distinct(tag) form stable where <\>\in\and\or order by limit ) + tdSql.query("select 7-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.dq_select) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice([self.limit_where[0] , self.limit_where[1]] ) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.error(sql) #distinct 和 order by 不能混合使用 + tdSql.query("select 7-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.dq_select) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + #sql += "%s " % random.choice(order_desc_where) + sql += "%s " % random.choice([self.limit_where[0] , self.limit_where[1]] ) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(1) + self.cur1.execute(sql) + + #calc_select,TWA/Diff/Derivative/Irate are not allowed to apply to super table directly + #8 select * from (select ts,calc form ragular_table where <\>\in\and\or order by ) + + # dcDB = self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 8-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select ts ," + sql += "%s " % random.choice(self.calc_select_support_ts) + sql += "from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) # 聚合函数不在可以和ts一起使用了 DB error: Not a single-group group function + self.cur1.execute(sql) + tdSql.query("select 8-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.calc_select_not_support_ts) + sql += "from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) # 聚合函数不在可以和ts一起使用了 DB error: Not a single-group group function + #self.cur1.execute(sql) + + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.calc_select_in_ts) + sql += "from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + #sql += "%s " % random.choice(order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 8-2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select t1.ts, " + sql += "%s " % random.choice(self.calc_select_in_support_ts_j) + sql += "from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql)# 聚合函数不在可以和ts一起使用了 DB error: Not a single-group group function + self.cur1.execute(sql) + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.calc_select_in_not_support_ts_j) + sql += "from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) + ##top返回结果有问题 tdSql.checkRows(1) + #self.cur1.execute(sql) + + #9 select * from (select ts,calc form stable where <\>\in\and\or order by ) + # self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 9-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.calc_select_not_support_ts) + sql += "from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) + # self.cur1.execute(sql) + tdSql.query("select 9-2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select ts ," + sql += "%s " % random.choice(self.calc_select_support_ts) + sql += "from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 9-3 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.calc_select_in_not_support_ts_j) + sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += " and %s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) + #self.cur1.execute(sql) + tdSql.query("select 9-4 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select t1.ts," + sql += "%s " % random.choice(self.calc_select_in_support_ts_j) + sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += " and %s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + #10 select calc from (select * form regualr_table where <\>\in\and\or order by ) + tdSql.query("select 10-1 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.calc_select_in_ts) + sql += "as calc10_1 from ( select * from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(1) + self.cur1.execute(sql) + + #10-1 select calc from (select * form regualr_table where <\>\in\and\or order by ) + # rsDn = self.restartDnodes() + # self.dropandcreateDB_random("%s" %db, 1) + # rsDn = self.restartDnodes() + tdSql.query("select 10-2 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.calc_select_all) + sql += "as calc10_2 from ( select * from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) + # tdSql.checkRows(1) + #self.cur1.execute(sql) + + #10-2 select calc from (select * form regualr_tables where <\>\in\and\or order by ) + tdSql.query("select 10-3 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s as calc10_3 " % random.choice(self.calc_select_all) + sql += " from ( select * from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += " and %s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + sql += "%s ;" % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 10-4 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s as calc10_4 " % random.choice(self.calc_select_all) + sql += " from ( select * from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_or_where) + sql += " and %s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) + # tdSql.checkRows(1) + #self.cur1.execute(sql) + + #11 select calc from (select * form stable where <\>\in\and\or order by limit ) + tdSql.query("select 11-1 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.calc_select_in_ts) + sql += "as calc11_1 from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(1) + self.cur1.execute(sql) + + #11-1 select calc from (select * form stable where <\>\in\and\or order by limit ) + tdSql.query("select 11-2 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.calc_select_all) + sql += "as calc11_1 from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) + #self.cur1.execute(sql) + #不好计算结果 tdSql.checkRows(1) + + #11-2 select calc from (select * form stables where <\>\in\and\or order by limit ) + tdSql.query("select 11-3 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.calc_select_all) + sql += "as calc11_1 from ( select * from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 11-4 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.calc_select_all) + sql += "as calc11_1 from ( select * from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) + #self.cur1.execute(sql) + + #12 select calc-diff from (select * form regualr_table where <\>\in\and\or order by limit ) + ##self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 12-1 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.calc_calculate_regular) + sql += " from ( select * from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + ##目前derivative不支持 tdSql.query(sql) + # tdSql.checkRows(1) + #self.cur1.execute(sql) + + tdSql.query("select 12-2 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.calc_calculate_regular) + sql += " from ( select * from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #目前derivative不支持 tdSql.query(sql) + # tdSql.checkRows(1) + #self.cur1.execute(sql) + + tdSql.query("select 12-2.2 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.calc_calculate_regular) + sql += " from ( select * from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #目前derivative不支持 tdSql.query(sql) + #self.cur1.execute(sql) + + #12-1 select calc-diff from (select * form stable where <\>\in\and\or order by limit ) + tdSql.query("select 12-3 from stable_1;") + self.restartDnodes() + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.calc_calculate_regular) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.group_where) + sql += ") " + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += " ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #目前derivative不支持 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 12-4 from stable_1;") + #join query does not support group by + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.calc_calculate_regular_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "%s " % random.choice(self.group_where_j) + sql += ") " + #sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += " ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #tdSql.query(sql) 目前de函数不支持,另外看看需要不需要将group by和pari by分开 + #self.cur1.execute(sql) + + tdSql.query("select 12-5 from stable_1;") + #join query does not support group by + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.calc_calculate_regular_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.group_where_j) + sql += ") " + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += " ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #derivative not support tdSql.query(sql) + #self.cur1.execute(sql) + + + #13 select calc-diff as diffns from (select * form stable where <\>\in\and\or order by limit ) + tdSql.query("select 13-1 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.calc_calculate_regular) + sql += " as calc13_1 from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.orders_desc_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #derivative not support tdSql.query(sql) + #self.cur1.execute(sql) + + #14 select * from (select calc_aggregate_alls as agg from stable where <\>\in\and\or group by order by slimit soffset ) + # TD-5955 select * from ( select count (q_double) from stable_1 where t_bool = true or t_bool = false group by loc order by ts asc slimit 1 ) ; + tdSql.query("select 14-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc14_1, " % random.choice(self.calc_aggregate_all) + sql += "%s as calc14_2, " % random.choice(self.calc_aggregate_all) + sql += "%s " % random.choice(self.calc_aggregate_all) + sql += " as calc14_3 from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.group_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15678 tdSql.query(sql) + # tdSql.checkRows(1) + #self.cur1.execute(sql) + + # error group by in out query + tdSql.query("select 14-2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc14_1, " % random.choice(self.calc_aggregate_all) + sql += "%s as calc14_2, " % random.choice(self.calc_aggregate_all) + sql += "%s " % random.choice(self.calc_aggregate_all) + sql += " as calc14_3 from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.group_where) + sql += "%s " % random.choice(self.having_support) + sql += "%s " % random.choice(self.orders_desc_where) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") " + sql += "%s " % random.choice(self.group_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15678 tdSql.query(sql) + # tdSql.checkRows(1) + #self.cur1.execute(sql) + + #14-2 select * from (select calc_aggregate_all_js as agg from stables where <\>\in\and\or group by order by slimit soffset ) + tdSql.query("select 14-3 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc14_1, " % random.choice(self.calc_aggregate_all_j) + sql += "%s as calc14_2, " % random.choice(self.calc_aggregate_all_j) + sql += "%s " % random.choice(self.calc_aggregate_all_j) + sql += " as calc14_3 from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "%s " % random.choice(self.partiton_where_j) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") " + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 14-4 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc14_1, " % random.choice(self.calc_aggregate_all_j) + sql += "%s as calc14_2, " % random.choice(self.calc_aggregate_all_j) + sql += "%s " % random.choice(self.calc_aggregate_all_j) + sql += " as calc14_3 from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.partiton_where_j) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") " + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + #15 select * from (select calc_aggregate_regulars as agg from regular_table where <\>\in\and\or order by slimit soffset ) + tdSql.query("select 15-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_regular) + sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_regular) + sql += "%s " % random.choice(self.calc_aggregate_regular) + sql += " as calc15_3 from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.group_where_regular) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #tdSql.query(sql) #Invalid function name: twa' + # tdSql.checkRows(1) + #self.cur1.execute(sql) + + tdSql.query("select 15-2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_regular_j) + sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_regular_j) + sql += "%s " % random.choice(self.calc_aggregate_regular_j) + sql += " as calc15_3 from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "%s " % random.choice(self.group_where_regular_j) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #tdSql.query(sql) #Invalid function name: twa' + #self.cur1.execute(sql) + + tdSql.query("select 15-2.2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_regular_j) + sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_regular_j) + sql += "%s " % random.choice(self.calc_aggregate_regular_j) + sql += " as calc15_3 from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.group_where_regular_j) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #tdSql.query(sql) #Invalid function name: twa' + #self.cur1.execute(sql) + + self.restartDnodes() + tdSql.query("select 15-3 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_groupbytbname) + sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_groupbytbname) + sql += "%s " % random.choice(self.calc_aggregate_groupbytbname) + sql += " as calc15_3 from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.group_where) + sql += "%s " % random.choice(self.having_support) + sql += "%s " % random.choice(self.order_desc_where) + sql += ") " + sql += "order by calc15_1 " + sql += "%s " % random.choice(self.limit_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #tdSql.query(sql) #Invalid function name: twa',可能还的去掉order by + #self.cur1.execute(sql) + + tdSql.query("select 15-4 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_groupbytbname_j) + sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_groupbytbname_j) + sql += "%s " % random.choice(self.calc_aggregate_groupbytbname_j) + sql += " as calc15_3 from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "%s " % random.choice(self.group_where_j) + sql += "%s " % random.choice(self.having_support_j) + #sql += "%s " % random.choice(orders_desc_where) + sql += ") " + sql += "order by calc15_1 " + sql += "%s " % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #tdSql.query(sql) #'Invalid function name: irate' + #self.cur1.execute(sql) + + tdSql.query("select 15-4.2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_groupbytbname_j) + sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_groupbytbname_j) + sql += "%s " % random.choice(self.calc_aggregate_groupbytbname_j) + sql += " as calc15_3 from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.group_where_j) + sql += "%s " % random.choice(self.having_support_j) + sql += "%s " % random.choice(self.orders_desc_where) + sql += ") " + sql += "order by calc15_1 " + sql += "%s " % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15678 #tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 15-5 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_groupbytbname) + sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_groupbytbname) + sql += "%s " % random.choice(self.calc_aggregate_groupbytbname) + sql += " as calc15_3 from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.group_where) + sql += ") " + sql += "order by calc15_1 " + sql += "%s " % random.choice(self.limit_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #tdSql.query(sql) #'Invalid function name: irate' + #self.cur1.execute(sql) + + #16 select * from (select calc_aggregate_regulars as agg from regular_table where <\>\in\and\or order by limit offset ) + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 16-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc16_0 , " % random.choice(self.calc_calculate_all) + sql += "%s as calc16_1 , " % random.choice(self.calc_aggregate_all) + sql += "%s as calc16_2 " % random.choice(self.calc_select_in) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.group_where) + #sql += "%s " % random.choice(having_support)having和 partition不能混合使用 + sql += ") " + sql += "order by calc16_0 " + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 16-2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc16_0 " % random.choice(self.calc_calculate_all_j) + sql += ", %s as calc16_1 " % random.choice(self.calc_aggregate_all_j) + #sql += ", %s as calc16_2 " % random.choice(self.calc_select_in_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += ") " + sql += "order by calc16_0 " + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 16-2.2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc16_0 " % random.choice(self.calc_calculate_all_j) + sql += ", %s as calc16_1 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.qt_u_or_where) + sql += ") " + sql += "order by calc16_0 " + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 16-3 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc16_1 " % random.choice(self.calc_calculate_regular) + sql += " from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "limit 2 ) " + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #tdSql.query(sql)#Invalid function name: derivative' + #self.cur1.execute(sql) + + tdSql.query("select 16-4 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc16_1 " % random.choice(self.calc_calculate_regular_j) + sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "limit 2 ) " + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #tdSql.query(sql)#Invalid function name: derivative' + #self.cur1.execute(sql) + + tdSql.query("select 16-4.2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc16_1 " % random.choice(self.calc_calculate_regular_j) + sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_or_where) + sql += "limit 2 ) " + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #tdSql.query(sql)#Invalid function name: derivative' + #self.cur1.execute(sql) + + tdSql.query("select 16-5 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc16_1 , " % random.choice(self.calc_calculate_all) + sql += "%s as calc16_1 , " % random.choice(self.calc_calculate_regular) + sql += "%s as calc16_2 " % random.choice(self.calc_select_all) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.group_where) + #sql += "%s " % random.choice(having_support) + sql += ") " + sql += "order by calc16_1 " + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 16-6 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc16_1 " % random.choice(self.calc_calculate_groupbytbname) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.group_where) + sql += "limit 2 ) " + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #Invalid function name: derivative' tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 16-7 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc16_1 " % random.choice(self.calc_calculate_groupbytbname_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "limit 2 ) " + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #Invalid function name: derivative' tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 16-8 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc16_1 " % random.choice(self.calc_calculate_groupbytbname_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.qt_u_or_where) + sql += "limit 2 ) " + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #Invalid function name: derivative' tdSql.query(sql) + #self.cur1.execute(sql) + + #17 select apercentile from (select calc_aggregate_alls form regualr_table or stable where <\>\in\and\or interval_sliding group by having order by limit offset )interval_sliding + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 17-1 from stable_1;") + for i in range(self.fornum): + #this is having_support , but tag-select cannot mix with last_row,other select can + sql = "select apercentile(cal17_0, %d)/10 ,apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_0 , " % random.choice(self.calc_calculate_all) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.partiton_where) + sql += "%s " % random.choice(self.interval_sliding) + #sql += "%s " % random.choice(having_support) + #sql += "%s " % random.choice(order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 17-2 from stable_1;") + for i in range(self.fornum): + #this is having_support , but tag-select cannot mix with last_row,other select can + sql = "select apercentile(cal17_0, %d)/10 ,apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_0 , " % random.choice(self.calc_calculate_all_j) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "%s " % random.choice(self.interval_sliding) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 17-2.2 from stable_1;") + for i in range(self.fornum): + #this is having_support , but tag-select cannot mix with last_row,other select can + sql = "select apercentile(cal17_0, %d)/10 ,apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_0 , " % random.choice(self.calc_calculate_all_j) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.interval_sliding) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + self.restartDnodes() + tdSql.query("select 17-3 from stable_1;") + for i in range(self.fornum): + #this is having_tagnot_support , because tag-select cannot mix with last_row... + sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.partiton_where) + sql += "%s " % random.choice(self.interval_sliding) + #sql += "%s " % random.choice(self.having_tagnot_support) + #sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 17-4 from stable_1;") + for i in range(self.fornum): + #this is having_tagnot_support , because tag-select cannot mix with last_row... + sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "%s " % random.choice(self.interval_sliding) + #sql += "%s " % random.choice(order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 17-4.2 from stable_1;") + for i in range(self.fornum): + #this is having_tagnot_support , because tag-select cannot mix with last_row... + sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.interval_sliding) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 17-5 from stable_1;") + for i in range(self.fornum): + #having_not_support + sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.partiton_where) + sql += "%s " % random.choice(self.interval_sliding) + # sql += "%s " % random.choice(self.having_not_support) + # sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 17-6 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.interval_sliding) + #sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 17-7 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1_1 t1, stable_1_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "%s " % random.choice(self.interval_sliding) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 17-7.2 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1_1 t1, stable_1_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.interval_sliding) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + self.restartDnodes() + tdSql.query("select 17-8 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all) + sql += " from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.interval_sliding) + #sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 17-9 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "%s " % random.choice(self.interval_sliding) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 17-10 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.interval_sliding) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + #18 select apercentile from (select calc_aggregate_alls form regualr_table or stable where <\>\in\and\or session order by limit )interval_sliding + tdSql.query("select 18-1 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all) + sql += " from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.session_where) + #sql += "%s " % random.choice(self.fill_where) + #sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 18-2 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "%s " % random.choice(self.session_u_where) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 18-2.2 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.session_u_where) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + self.restartDnodes() + tdSql.query("select 18-3 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all) + sql += " from stable_1_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.session_where) + #sql += "%s " % random.choice(self.fill_where) + #sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 18-4 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "%s " % random.choice(self.session_u_where) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 18-4.2 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.session_u_where) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 18-5 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.session_where) + #sql += "%s " % random.choice(self.fill_where) + #sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 18-6 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "%s " % random.choice(self.session_u_where) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 18-7 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.session_u_where) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + #19 select apercentile from (select calc_aggregate_alls form regualr_table or stable where <\>\in\and\or session order by limit )interval_sliding + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 19-1 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all) + sql += " from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.state_window) + #sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 19-2 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "%s " % random.choice(self.state_u_window) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 19-2.2 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.state_u_window) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 19-3 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all) + sql += " from stable_1_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.state_window) + #sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 19-4 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1_1 t1, stable_1_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + #sql += "%s " % random.choice(self.state_window) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 19-4.2 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1_1 t1, stable_1_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_or_where) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 19-5 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.state_window) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.error(sql) #'STATE_WINDOW not support for super table query' + + tdSql.query("select 19-6 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + #sql += "%s " % random.choice(self.state_window) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 19-7 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.qt_u_or_where) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + #20 select * from (select calc_select_fills form regualr_table or stable where <\>\in\and\or fill_where group by order by limit offset ) + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 20-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s , " % random.choice(self.calc_select_fill) + sql += "%s ," % random.choice(self.calc_select_fill) + sql += "%s " % random.choice(self.calc_select_fill) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.interp_where) + sql += "%s " % random.choice(self.fill_where) + sql += "%s " % random.choice(self.group_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + #interp不支持 tdSql.query(sql) + #self.cur1.execute(sql) + + rsDn = self.restartDnodes() + tdSql.query("select 20-2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s , " % random.choice(self.calc_select_fill_j) + sql += "%s ," % random.choice(self.calc_select_fill_j) + sql += "%s " % random.choice(self.calc_select_fill_j) + sql += " from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s and " % random.choice(self.t_join_where) + sql += "%s " % random.choice(self.interp_where_j) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + #interp不支持 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 20-2.2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s , " % random.choice(self.calc_select_fill_j) + sql += "%s ," % random.choice(self.calc_select_fill_j) + sql += "%s " % random.choice(self.calc_select_fill_j) + sql += " from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s and " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.interp_where_j) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + #interp不支持 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 20-3 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s , " % random.choice(self.calc_select_fill) + sql += "%s ," % random.choice(self.calc_select_fill) + sql += "%s " % random.choice(self.calc_select_fill) + sql += " from stable_1 where " + sql += "%s " % self.interp_where[2] + sql += "%s " % random.choice(self.fill_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + #interp不支持 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 20-4 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s , " % random.choice(self.calc_select_fill_j) + sql += "%s ," % random.choice(self.calc_select_fill_j) + sql += "%s " % random.choice(self.calc_select_fill_j) + sql += " from stable_1 t1, table_1 t2 where t1.ts = t2.ts and " + #sql += "%s and " % random.choice(self.t_join_where) + sql += "%s " % self.interp_where_j[random.randint(0,5)] + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + #interp不支持 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 20-4.2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s , " % random.choice(self.calc_select_fill_j) + sql += "%s ," % random.choice(self.calc_select_fill_j) + sql += "%s " % random.choice(self.calc_select_fill_j) + sql += " from stable_1 t1, stable_1_1 t2 where t1.ts = t2.ts and " + sql += "%s and " % random.choice(self.qt_u_or_where) + sql += "%s " % self.interp_where_j[random.randint(0,5)] + sql += "%s " % random.choice(self.fill_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + ##interp不支持 tdSql.error(sql) + #self.cur1.execute(sql) + + tdSql.query("select 20-5 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s , " % random.choice(self.calc_select_fill) + sql += "%s ," % random.choice(self.calc_select_fill) + sql += "%s " % random.choice(self.calc_select_fill) + sql += " from regular_table_1 where " + sql += "%s " % self.interp_where[1] + sql += "%s " % random.choice(self.fill_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + ##interp不支持 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 20-6 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s , " % random.choice(self.calc_select_fill_j) + sql += "%s ," % random.choice(self.calc_select_fill_j) + sql += "%s " % random.choice(self.calc_select_fill_j) + sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + #sql += "%s " % random.choice(self.interp_where_j) + sql += "%s " % self.interp_where_j[random.randint(0,5)] + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + ##interp不支持 tdSql.query(sql) + #self.cur1.execute(sql) + + #1 select * from (select * from (select * form regular_table where <\>\in\and\or order by limit )) + tdSql.query("select 1-1 from stable_1;") + for i in range(self.fornum): + # sql_start = "select * from ( " + # sql_end = ")" + for_num = random.randint(1, 15); + sql = "select * from (" * for_num + sql += "select * from ( select * from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += ")) " + sql += ")" * for_num + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + sql2 = "select * from ( select * from ( select " + sql2 += "%s, " % random.choice(self.s_r_select) + sql2 += "%s, " % random.choice(self.q_select) + sql2 += "ts from regular_table_1 where " + sql2 += "%s " % random.choice(self.q_where) + sql2 += ")) " + tdLog.info(sql2) + tdLog.info(len(sql2)) + tdSql.query(sql2) + self.cur1.execute(sql2) + + self.data_matrix_equal('%s' %sql ,1,10,1,1,'%s' %sql2 ,1,10,1,1) + self.data_matrix_equal('%s' %sql ,1,10,1,1,'%s' %sql ,1,10,3,3) + self.data_matrix_equal('%s' %sql ,1,10,3,3,'%s' %sql2 ,1,10,3,3) + + for i in range(self.fornum): + for_num = random.randint(1, 15); + sql = "select ts from (" * for_num + sql += "select * from ( select * from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += ")) " + sql += ")" * for_num + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + sql2 = "select * from ( select * from ( select " + sql2 += "%s, " % random.choice(self.s_r_select) + sql2 += "%s, " % random.choice(self.q_select) + sql2 += "ts from regular_table_1 where " + sql2 += "%s " % random.choice(self.q_where) + sql2 += ")) " + tdLog.info(sql2) + tdLog.info(len(sql2)) + tdSql.query(sql2) + self.cur1.execute(sql2) + + self.data_matrix_equal('%s' %sql ,1,10,1,1,'%s' %sql2 ,1,10,1,1) + + #2 select * from (select * from (select * form stable where <\>\in\and\or order by limit )) + tdSql.query("select 2-1 from stable_1;") + for i in range(self.fornum): + for_num = random.randint(1, 15); + sql = "select * from (" * for_num + sql += "select * from ( select * from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.qt_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += ")) " + sql += ")" * for_num + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + sql2 = "select * from ( select * from ( select " + sql2 += "%s, " % random.choice(self.s_s_select) + sql2 += "%s, " % random.choice(self.qt_select) + sql2 += "ts from stable_1 where " + sql2 += "%s " % random.choice(self.q_where) + sql2 += ")) " + tdLog.info(sql2) + tdLog.info(len(sql2)) + tdSql.query(sql2) + self.cur1.execute(sql2) + + self.data_matrix_equal('%s' %sql ,1,10,3,3,'%s' %sql2 ,1,10,3,3) + + for i in range(self.fornum): + for_num = random.randint(1, 15); + sql = "select ts from (" * for_num + sql += "select * from ( select * from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.qt_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += ")) " + sql += ")" * for_num + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + sql2 = "select ts from ( select * from ( select " + sql2 += "%s, " % random.choice(self.s_s_select) + sql2 += "%s, " % random.choice(self.qt_select) + sql2 += "ts from stable_1 where " + sql2 += "%s " % random.choice(self.q_where) + sql2 += ")) " + tdLog.info(sql2) + tdLog.info(len(sql2)) + tdSql.query(sql2) + self.cur1.execute(sql2) + + self.data_matrix_equal('%s' %sql ,1,10,1,1,'%s' %sql2 ,1,10,1,1) + + #3 select ts ,calc from (select * form stable where <\>\in\and\or order by limit ) + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 3-1 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.calc_calculate_regular) + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.orders_desc_where) + sql += "%s " % random.choice(self.limit_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #'Invalid function name: derivative' tdSql.query(sql) + #self.cur1.execute(sql) + + #4 select * from (select calc form stable where <\>\in\and\or order by limit ) + tdSql.query("select 4-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.calc_select_in_ts) + sql += "from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + #sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice(self.limit_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + #5 select ts ,tbname from (select * form stable where <\>\in\and\or order by limit ) + tdSql.query("select 5-1 from stable_1;") + for i in range(self.fornum): + sql = "select ts , tbname , " + sql += "%s ," % random.choice(self.calc_calculate_regular) + sql += "%s ," % random.choice(self.dqt_select) + sql += "%s " % random.choice(self.qt_select) + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.orders_desc_where) + sql += "%s " % random.choice(self.limit_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.error(sql) + + #special sql + tdSql.query("select 6-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select _block_dist() from stable_1);" + # tdSql.query(sql) + # tdSql.checkRows(1) + sql = "select _block_dist() from (select * from stable_1);" + tdSql.error(sql) + sql = "select * from (select database());" + tdSql.error(sql) + sql = "select * from (select client_version());" + tdSql.error(sql) + sql = "select * from (select client_version() as version);" + tdSql.error(sql) + sql = "select * from (select server_version());" + tdSql.error(sql) + sql = "select * from (select server_version() as version);" + tdSql.error(sql) + sql = "select * from (select server_status());" + tdSql.error(sql) + sql = "select * from (select server_status() as status);" + tdSql.error(sql) + + #taos -f sql + startTime_taos_f = time.time() + print("taos -f sql start!") + taos_cmd1 = "taos -f %s/%s.sql" % (self.testcasePath,self.testcaseFilename) + _ = subprocess.check_output(taos_cmd1, shell=True).decode("utf-8") + print("taos -f sql over!") + endTime_taos_f = time.time() + print("taos_f total time %ds" % (endTime_taos_f - startTime_taos_f)) + + print('=====================2.6 old function end ===========') + + + + def run(self): + tdSql.prepare() + + startTime = time.time() + + # + + + #self.math_nest(['TAIL']) #TD-16009 + # self.math_nest(['HYPERLOGLOG']) #TD-16038 + # self.math_nest(['UNIQUE']) + + + + # # + #self.function_before_26() #TD-16031 + + # self.math_nest(['ABS','SQRT']) #TD-16042 + # self.math_nest(['SIN','COS','TAN','ASIN','ACOS','ATAN']) + # self.math_nest(['POW','LOG']) #TD-16039 + # self.math_nest(['FLOOR','CEIL','ROUND']) + # #self.math_nest(['SAMPLE']) #TD-16017 + # #self.math_nest(['CSUM']) #TD-15936 crash + # self.math_nest(['MAVG']) + + self.str_nest(['LTRIM','RTRIM','LOWER','UPPER']) + self.str_nest(['LENGTH','CHAR_LENGTH']) + self.str_nest(['SUBSTR']) #TD-16042 + self.str_nest(['CONCAT']) #TD-16002 偶尔 + self.str_nest(['CONCAT_WS']) #TD-16002 偶尔 + # self.time_nest(['CAST']) #TD-16017偶尔,放到time里起来弄 + self.time_nest(['CAST_1']) + self.time_nest(['CAST_2']) + self.time_nest(['CAST_3']) + self.time_nest(['CAST_4']) + + + + # self.time_nest(['NOW','TODAY']) # + # self.time_nest(['TIMEZONE']) # + # self.time_nest(['TIMETRUNCATE']) #TD-16039 + # self.time_nest(['TO_ISO8601']) + # self.time_nest(['TO_UNIXTIMESTAMP'])#core多 + # self.time_nest(['ELAPSED']) + + + endTime = time.time() + print("total time %ds" % (endTime - startTime)) + + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/percentile.py b/tests/system-test/2-query/percentile.py new file mode 100644 index 0000000000000000000000000000000000000000..8df9bcb9ce4df065a151d33116f1331298ee35fd --- /dev/null +++ b/tests/system-test/2-query/percentile.py @@ -0,0 +1,83 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from platform import java_ver +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.rowNum = 10 + self.ts = 1537146000000 + + def run(self): + tdSql.prepare() + + intData = [] + floatData = [] + + tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''') + for i in range(self.rowNum): + tdSql.execute("insert into test values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + intData.append(i + 1) + floatData.append(i + 0.1) + + # percentile verifacation + tdSql.error("select percentile(ts ,20) from test") + tdSql.error("select percentile(col7 ,20) from test") + tdSql.error("select percentile(col8 ,20) from test") + tdSql.error("select percentile(col9 ,20) from test") + column_list = [1,2,3,4,11,12,13,14] + percent_list = [0,50,100] + for i in column_list: + for j in percent_list: + tdSql.query(f"select percentile(col{i}, {j}) from test") + tdSql.checkData(0, 0, np.percentile(intData, j)) + + for i in [5,6]: + for j in percent_list: + tdSql.query(f"select percentile(col{i}, {j}) from test") + tdSql.checkData(0, 0, np.percentile(floatData, j)) + + tdSql.execute("create table meters (ts timestamp, voltage int) tags(loc nchar(20))") + tdSql.execute("create table t0 using meters tags('beijing')") + tdSql.execute("create table t1 using meters tags('shanghai')") + for i in range(self.rowNum): + tdSql.execute("insert into t0 values(%d, %d)" % (self.ts + i, i + 1)) + tdSql.execute("insert into t1 values(%d, %d)" % (self.ts + i, i + 1)) + + # tdSql.error("select percentile(voltage, 20) from meters") + + + + tdSql.execute("create table st(ts timestamp, k int)") + tdSql.execute("insert into st values(now, -100)(now+1a,-99)") + tdSql.query("select apercentile(k, 20) from st") + tdSql.checkData(0, 0, -100.00) + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/sample.py b/tests/system-test/2-query/sample.py new file mode 100644 index 0000000000000000000000000000000000000000..94e06347d2923fc60d99768c667b927dde5dfd83 --- /dev/null +++ b/tests/system-test/2-query/sample.py @@ -0,0 +1,863 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from pstats import Stats +import sys +import subprocess +import random +import math +import numpy as np +import inspect +import re +import taos + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def sample_query_form(self, sel="select", func="sample(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""): + ''' + sample function: + + :param sel: string, must be "select", required parameters; + :param func: string, in this case must be "sample(", otherwise return other function, required parameters; + :param col: string, column name, required parameters; + :param m_comm: string, comma between col and k , required parameters; + :param k: int/float,the width of the sliding window, [1,100], required parameters; + :param r_comm: string, must be ")", use with "(" in func, required parameters; + :param alias: string, result column another name,or add other funtion; + :param fr: string, must be "from", required parameters; + :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters; + :param condition: expression; + :return: sample query statement,default: select sample(c1, 1) from t1 + ''' + + return f"{sel} {func} {col} {m_comm} {k} {r_comm} {alias} {fr} {table_expr} {condition}" + + def checksample(self,sel="select", func="sample(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""): + # print(self.sample_query_form(sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition)) + line = sys._getframe().f_back.f_lineno + + if not all([sel , func , col , m_comm , k , r_comm , fr , table_expr]): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + + sql = "select * from t1" + collist = tdSql.getColNameList(sql) + + if not isinstance(col, str): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if len([x for x in col.split(",") if x.strip()]) != 1: + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + col = col.replace(",", "").replace(" ","") + + if any([re.compile('^[a-zA-Z]{1}.*$').match(col) is None , not col.replace(".","").isalnum()]): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if '.' in col: + if any([col.split(".")[0] not in table_expr, col.split(".")[1] not in collist]): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + if "." not in col: + if col not in collist: + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + # colname = col if "." not in col else col.split(".")[1] + # col_index = collist.index(colname) + # if any([tdSql.cursor.istype(col_index, "TIMESTAMP"), tdSql.cursor.istype(col_index, "BOOL")]): + # print(f"case in {line}: ", end='') + # return tdSql.error(self.sample_query_form( + # sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition + # )) + # + # if any([tdSql.cursor.istype(col_index, "BINARY") , tdSql.cursor.istype(col_index,"NCHAR")]): + # print(f"case in {line}: ", end='') + # return tdSql.error(self.sample_query_form( + # sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition + # )) + + if any( [func != "sample(" , r_comm != ")" , fr != "from", sel != "select"]): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if all(["(" not in table_expr, "stb" in table_expr, "group" not in condition.lower()]): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if all(["group" in condition.lower(), "tbname" not in condition.lower()]): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + alias_list = ["tbname", "_c0", "st", "ts"] + if all([alias, "," not in alias]): + if any([ not alias.isalnum(), re.compile('^[a-zA-Z]{1}.*$').match(col) is None ]): + # actually, column alias also support "_", but in this case,forbidden that。 + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if all([alias, "," in alias]): + if all(parm != alias.lower().split(",")[1].strip() for parm in alias_list): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + condition_exception = [ "-", "+", "/", "*", "~", "^", "insert", "distinct", + "count", "avg", "twa", "irate", "sum", "stddev", "leastquares", + "min", "max", "first", "last", "top", "bottom", "percentile", + "apercentile", "last_row", "interp", "diff", "derivative", + "spread", "ceil", "floor", "round", "interval", "fill", "slimit", "soffset"] + if "union" not in condition.lower(): + if any(parm in condition.lower().strip() for parm in condition_exception): + + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + if not any([isinstance(k, int) , isinstance(k, float)]) : + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + col=col, k=k, alias=alias, table_expr=table_expr, condition=condition + )) + + if not(1 <= k < 1001): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + col=col, k=k, alias=alias, table_expr=table_expr, condition=condition + )) + + k = int(k // 1) + pre_sql = re.sub("sample\([a-z0-9 .,]*\)", f"count({col})", self.sample_query_form( + col=col, table_expr=table_expr, condition=condition + )) + tdSql.query(pre_sql) + if tdSql.queryRows == 0: + tdSql.query(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + print(f"case in {line}: ", end='') + tdSql.checkRows(0) + return + + tdSql.query(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + sample_result = tdSql.queryResult + sample_len = tdSql.queryRows + + if "group" in condition: + tb_condition = condition.split("group by")[1].split(" ")[1] + tdSql.query(f"select distinct {tb_condition} from {table_expr}") + query_result = tdSql.queryResult + query_rows = tdSql.queryRows + clear_condition = re.sub('order by [0-9a-z]*|slimit [0-9]*|soffset [0-9]*', "", condition) + + pre_row = 0 + for i in range(query_rows): + group_name = query_result[i][0] + if "where" in clear_condition: + pre_condition = re.sub('group by [0-9a-z]*', f"and {tb_condition}='{group_name}' and {col} is not null", clear_condition) + else: + pre_condition = "where " + re.sub('group by [0-9a-z]*',f"{tb_condition}='{group_name}' and {col} is not null", clear_condition) + + tdSql.query(f"select ts, {col} {alias} from {table_expr} {pre_condition}") + # pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + # pre_sample = np.convolve(pre_data, np.ones(k), "valid")/k + pre_sample = tdSql.queryResult + pre_len = tdSql.queryRows + step = pre_len if pre_len < k else k + # tdSql.query(self.sample_query_form( + # sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition + # )) + for i in range(step): + if sample_result[pre_row:pre_row+step][i] not in pre_sample: + tdLog.exit(f"case in {line} is failed: sample data is not in {group_name}") + else: + tdLog.info(f"case in {line} is success: sample data is in {group_name}") + + # for j in range(len(pre_sample)): + # print(f"case in {line}:", end='') + # tdSql.checkData(pre_row+j, 1, pre_sample[j]) + pre_row += step + return + elif "union" in condition: + union_sql_0 = self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + ).split("union all")[0] + + union_sql_1 = self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + ).split("union all")[1] + + tdSql.query(union_sql_0) + # union_sample_0 = tdSql.queryResult + row_union_0 = tdSql.queryRows + + tdSql.query(union_sql_1) + # union_sample_1 = tdSql.queryResult + row_union_1 = tdSql.queryRows + + tdSql.query(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + # for i in range(tdSql.queryRows): + # print(f"case in {line}: ", end='') + # if i < row_union_0: + # tdSql.checkData(i, 1, union_sample_0[i][1]) + # else: + # tdSql.checkData(i, 1, union_sample_1[i-row_union_0][1]) + if row_union_0 + row_union_1 != sample_len: + tdLog.exit(f"case in {line} is failed: sample data is not in ") + else: + tdLog.info(f"case in {line} is success: sample data is in ") + return + + else: + if "where" in condition: + condition = re.sub('where', f"where {col} is not null and ", condition) + else: + condition = f"where {col} is not null" + condition + print(f"select ts, {col} {alias} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}") + tdSql.query(f"select ts, {col} {alias} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}") + # offset_val = condition.split("offset")[1].split(" ")[1] if "offset" in condition else 0 + pre_sample = tdSql.queryResult + # pre_len = tdSql.queryRows + # for i in range(sample_len): + # if sample_result[pre_row:pre_row + step][i] not in pre_sample: + # tdLog.exit(f"case in {line} is failed: sample data is not in {group_name}") + # else: + # tdLog.info(f"case in {line} is success: sample data is in {group_name}") + + pass + + def sample_current_query(self) : + + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + + # case1~6: numeric col:int/bigint/tinyint/smallint/float/double + self.checksample() + case2 = {"col": "c2"} + self.checksample(**case2) + case3 = {"col": "c5"} + self.checksample(**case3) + case4 = {"col": "c7"} + self.checksample(**case4) + case5 = {"col": "c8"} + self.checksample(**case5) + case6 = {"col": "c9"} + self.checksample(**case6) + + # # case7~8: nested query + # case7 = {"table_expr": "(select c1 from stb1)"} + # self.checksample(**case7) + # case8 = {"table_expr": "(select sample(c1, 1) c1 from stb1 group by tbname)"} + # self.checksample(**case8) + + # case9~10: mix with tbname/ts/tag/col + # case9 = {"alias": ", tbname"} + # self.checksample(**case9) + # case10 = {"alias": ", _c0"} + # self.checksample(**case10) + case11 = {"alias": ", st1"} + self.checksample(**case11) + case12 = {"alias": ", c1"} + self.checksample(**case12) + + # case13~15: with single condition + case13 = {"condition": "where c1 <= 10"} + self.checksample(**case13) + case14 = {"condition": "where c6 in (0, 1)"} + self.checksample(**case14) + case15 = {"condition": "where c1 between 1 and 10"} + self.checksample(**case15) + + # case16: with multi-condition + case16 = {"condition": "where c6=1 or c6 =0"} + self.checksample(**case16) + + # # case17: only support normal table join + # case17 = { + # "col": "t1.c1", + # "table_expr": "t1, t2", + # "condition": "where t1.ts=t2.ts" + # } + # self.checksample(**case17) + # # case18~19: with group by + # case19 = { + # "table_expr": "stb1", + # "condition": "partition by tbname" + # } + # self.checksample(**case19) + + # # case20~21: with order by + # case20 = {"condition": "order by ts"} + # self.checksample(**case20) + # case21 = { + # "table_expr": "stb1", + # "condition": "partition by tbname order by tbname" + # } + # self.checksample(**case21) + + # case22: with union + case22 = { + "condition": "union all select sample( c1 , 1 ) from t2" + } + self.checksample(**case22) + + # case23: with limit/slimit + case23 = { + "condition": "limit 1" + } + self.checksample(**case23) + + # case24: value k range[1, 100], can be int or float, k = floor(k) + case24 = {"k": 3} + self.checksample(**case24) + case25 = {"k": 2.999} + self.checksample(**case25) + case26 = {"k": 1000} + self.checksample(**case26) + case27 = { + "table_expr": "stb1", + "condition": "group by tbname slimit 1 " + } + self.checksample(**case27) # with slimit + case28 = { + "table_expr": "stb1", + "condition": "group by tbname slimit 1 soffset 1" + } + self.checksample(**case28) # with soffset + + pass + + def sample_error_query(self) -> None : + # unusual test + + # form test + err1 = {"col": ""} + self.checksample(**err1) # no col + err2 = {"sel": ""} + self.checksample(**err2) # no select + err3 = {"func": "sample", "col": "", "m_comm": "", "k": "", "r_comm": ""} + self.checksample(**err3) # no sample condition: select sample from + err4 = {"col": "", "m_comm": "", "k": ""} + self.checksample(**err4) # no sample condition: select sample() from + err5 = {"func": "sample", "r_comm": ""} + self.checksample(**err5) # no brackets: select sample col, k from + err6 = {"fr": ""} + self.checksample(**err6) # no from + err7 = {"k": ""} + self.checksample(**err7) # no k + err8 = {"table_expr": ""} + self.checksample(**err8) # no table_expr + + # err9 = {"col": "st1"} + # self.checksample(**err9) # col: tag + tdSql.query(" select sample(st1 ,1) from t1 ") + err10 = {"col": 1} + self.checksample(**err10) # col: value + err11 = {"col": "NULL"} + self.checksample(**err11) # col: NULL + err12 = {"col": "%_"} + self.checksample(**err12) # col: %_ + err13 = {"col": "c3"} + self.checksample(**err13) # col: timestamp col + err14 = {"col": "_c0"} + # self.checksample(**err14) # col: Primary key + err15 = {"col": "avg(c1)"} + # self.checksample(**err15) # expr col + err16 = {"col": "c4"} + self.checksample(**err16) # binary col + err17 = {"col": "c10"} + self.checksample(**err17) # nchar col + err18 = {"col": "c6"} + self.checksample(**err18) # bool col + err19 = {"col": "'c1'"} + self.checksample(**err19) # col: string + err20 = {"col": None} + self.checksample(**err20) # col: None + err21 = {"col": "''"} + self.checksample(**err21) # col: '' + err22 = {"col": "tt1.c1"} + self.checksample(**err22) # not table_expr col + err23 = {"col": "t1"} + self.checksample(**err23) # tbname + err24 = {"col": "stb1"} + self.checksample(**err24) # stbname + err25 = {"col": "db"} + self.checksample(**err25) # datbasename + err26 = {"col": "True"} + self.checksample(**err26) # col: BOOL 1 + err27 = {"col": True} + self.checksample(**err27) # col: BOOL 2 + err28 = {"col": "*"} + self.checksample(**err28) # col: all col + err29 = {"func": "sample[", "r_comm": "]"} + self.checksample(**err29) # form: sample[col, k] + err30 = {"func": "sample{", "r_comm": "}"} + self.checksample(**err30) # form: sample{col, k} + err31 = {"col": "[c1]"} + self.checksample(**err31) # form: sample([col], k) + err32 = {"col": "c1, c2"} + self.checksample(**err32) # form: sample(col, col2, k) + err33 = {"col": "c1, 2"} + self.checksample(**err33) # form: sample(col, k1, k2) + err34 = {"alias": ", count(c1)"} + self.checksample(**err34) # mix with aggregate function 1 + err35 = {"alias": ", avg(c1)"} + self.checksample(**err35) # mix with aggregate function 2 + err36 = {"alias": ", min(c1)"} + self.checksample(**err36) # mix with select function 1 + err37 = {"alias": ", top(c1, 5)"} + self.checksample(**err37) # mix with select function 2 + err38 = {"alias": ", spread(c1)"} + self.checksample(**err38) # mix with calculation function 1 + err39 = {"alias": ", diff(c1)"} + self.checksample(**err39) # mix with calculation function 2 + # err40 = {"alias": "+ 2"} + # self.checksample(**err40) # mix with arithmetic 1 + # tdSql.query(" select sample(c1 , 1) + 2 from t1 ") + err41 = {"alias": "+ avg(c1)"} + self.checksample(**err41) # mix with arithmetic 2 + err42 = {"alias": ", c1"} + self.checksample(**err42) # mix with other col + # err43 = {"table_expr": "stb1"} + # self.checksample(**err43) # select stb directly + err44 = { + "col": "stb1.c1", + "table_expr": "stb1, stb2", + "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts" + } + self.checksample(**err44) # stb join + err45 = { + "condition": "where ts>0 and ts < now interval(1h) fill(next)" + } + self.checksample(**err45) # interval + err46 = { + "table_expr": "t1", + "condition": "group by c6" + } + # self.checksample(**err46) # group by normal col + + err49 = {"k": "2021-01-01 00:00:00.000"} + self.checksample(**err49) # k: timestamp + err50 = {"k": False} + self.checksample(**err50) # k: False + err51 = {"k": "%"} + self.checksample(**err51) # k: special char + err52 = {"k": ""} + self.checksample(**err52) # k: "" + err53 = {"k": None} + self.checksample(**err53) # k: None + err54 = {"k": "NULL"} + self.checksample(**err54) # k: null + err55 = {"k": "binary(4)"} + self.checksample(**err55) # k: string + err56 = {"k": "c1"} + self.checksample(**err56) # k: sring,col name + err57 = {"col": "c1, 1, c2"} + self.checksample(**err57) # form: sample(col1, k1, col2, k2) + err58 = {"col": "c1 cc1"} + self.checksample(**err58) # form: sample(col newname, k) + err59 = {"k": "'1'"} + # self.checksample(**err59) # formL sample(colm, "1") + err60 = {"k": "-1-(-2)"} + # self.checksample(**err60) # formL sample(colm, -1-2) + err61 = {"k": 1001} + self.checksample(**err61) # k: right out of [1, 1000] + err62 = {"k": -1} + self.checksample(**err62) # k: negative number + err63 = {"k": 0} + self.checksample(**err63) # k: 0 + err64 = {"k": 2**63-1} + self.checksample(**err64) # k: max(bigint) + err65 = {"k": 1-2**63} + # self.checksample(**err65) # k: min(bigint) + err66 = {"k": -2**63} + self.checksample(**err66) # k: NULL + err67 = {"k": 0.999999} + self.checksample(**err67) # k: left out of [1, 1000] + + pass + + def sample_test_data(self, tbnum:int, data_row:int, basetime:int) -> None : + for i in range(tbnum): + for j in range(data_row): + tdSql.execute( + f"insert into t{i} values (" + f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " + f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " + f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" + ) + + tdSql.execute( + f"insert into t{i} values (" + f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, " + f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, " + f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )" + ) + tdSql.execute( + f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" + ) + + pass + + def sample_test_table(self,tbnum: int) -> None : + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute( + "create stable db.stb1 (\ + ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \ + c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\ + ) \ + tags(st1 int)" + ) + tdSql.execute( + "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)" + ) + for i in range(tbnum): + tdSql.execute(f"create table t{i} using stb1 tags({i})") + tdSql.execute(f"create table tt{i} using stb2 tags({i})") + + pass + + + def check_sample(self , sample_query , origin_query ): + + tdSql.query(origin_query) + + origin_datas = tdSql.queryResult + + tdSql.query(sample_query) + + sample_datas = tdSql.queryResult + status = True + for ind , sample_data in enumerate(sample_datas): + if sample_data not in origin_datas: + status = False + + if status: + tdLog.info(" sample data is in datas groups ,successed sql is : %s" % sample_query ) + else: + tdLog.exit(" sample data is not in datas groups ,failed sql is : %s" % sample_query ) + + + def basic_sample_query(self): + tdSql.execute(" drop database if exists db ") + tdSql.execute(" create database if not exists db days 300 ") + tdSql.execute(" use db ") + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + + for i in range(9): + tdSql.execute( + f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute( + f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + + tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + + tdSql.execute( + f'''insert into t1 values + ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) + ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) + ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a ) + ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a ) + ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a ) + ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a ) + ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" ) + ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" ) + ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" ) + ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ''' + ) + + # basic query for sample + + # params test for all + tdSql.error(" select sample(c1,c1) from t1 ") + tdSql.error(" select sample(c1,now) from t1 ") + tdSql.error(" select sample(c1,tbname) from t1 ") + tdSql.error(" select sample(c1,ts) from t1 ") + tdSql.error(" select sample(c1,false) from t1 ") + tdSql.error(" select sample(123,1) from t1 ") + + tdSql.query(" select sample(c1,2) from t1 ") + tdSql.checkRows(2) + tdSql.query(" select sample(c1,10) from t1 ") + tdSql.checkRows(9) + tdSql.query(" select sample(c8,10) from t1 ") + tdSql.checkRows(9) + tdSql.query(" select sample(c1,999) from t1 ") + tdSql.checkRows(9) + tdSql.query(" select sample(c1,1000) from t1 ") + tdSql.checkRows(9) + tdSql.query(" select sample(c8,1000) from t1 ") + tdSql.checkRows(9) + tdSql.error(" select sample(c1,-1) from t1 ") + + # bug need fix + # tdSql.query("select sample(c1 ,2) , 123 from stb1;") + + # all type support + tdSql.query(" select sample(c1 , 20 ) from ct4 ") + tdSql.checkRows(9) + + tdSql.query(" select sample(c2 , 20 ) from ct4 ") + tdSql.checkRows(9) + + tdSql.query(" select sample(c3 , 20 ) from ct4 ") + tdSql.checkRows(9) + + tdSql.query(" select sample(c4 , 20 ) from ct4 ") + tdSql.checkRows(9) + + tdSql.query(" select sample(c5 , 20 ) from ct4 ") + tdSql.checkRows(9) + + tdSql.query(" select sample(c6 , 20 ) from ct4 ") + tdSql.checkRows(9) + + tdSql.query(" select sample(c7 , 20 ) from ct4 ") + tdSql.checkRows(9) + + tdSql.query(" select sample(c8 , 20 ) from ct4 ") + tdSql.checkRows(9) + + tdSql.query(" select sample(c9 , 20 ) from ct4 ") + tdSql.checkRows(9) + + tdSql.query(" select sample(c10 , 20 ) from ct4 ") + tdSql.checkRows(9) + + tdSql.query(" select sample(t1 , 20 ) from ct1 ") + tdSql.checkRows(13) + # filter data + + tdSql.query(" select sample(c1, 20 ) from t1 where c1 is null ") + tdSql.checkRows(0) + + tdSql.query(" select sample(c1, 20 ) from t1 where c1 =6 ") + tdSql.checkRows(1) + + tdSql.query(" select sample(c1, 20 ) from t1 where c1 > 6 ") + tdSql.checkRows(3) + + self.check_sample("select sample(c1, 20 ) from t1 where c1 > 6" , "select c1 from t1 where c1 > 6") + + tdSql.query(" select sample( c1 , 1 ) from t1 where c1 in (0, 1,2) ") + tdSql.checkRows(1) + + tdSql.query("select sample( c1 ,3 ) from t1 where c1 between 1 and 10 ") + tdSql.checkRows(3) + + self.check_sample("select sample( c1 ,3 ) from t1 where c1 between 1 and 10" ,"select c1 from t1 where c1 between 1 and 10") + + # join + + tdSql.query("select sample( ct4.c1 , 1 ) from ct1, ct4 where ct4.ts=ct1.ts") + + # partition by tbname + + tdSql.query("select sample(c1,2) from stb1 partition by tbname") + tdSql.checkRows(4) + + self.check_sample("select sample(c1,2) from stb1 partition by tbname" , "select c1 from stb1 partition by tbname") + + # nest query + # tdSql.query("select sample(c1,2) from (select c1 from t1); ") + # tdSql.checkRows(2) + + # union all + tdSql.query("select sample(c1,2) from t1 union all select sample(c1,3) from t1") + tdSql.checkRows(5) + + # fill interval + + # not support mix with other function + tdSql.error("select top(c1,2) , sample(c1,2) from ct1") + tdSql.error("select max(c1) , sample(c1,2) from ct1") + tdSql.error("select c1 , sample(c1,2) from ct1") + + # bug for mix with scalar + # tdSql.error("select 123 , sample(c1,100) from ct1") + # tdSql.error("select sample(c1,100)+2 from ct1") + # tdSql.error("select abs(sample(c1,100)) from ct1") + + def sample_test_run(self) : + tdLog.printNoPrefix("==========TD-10594==========") + tbnum = 10 + nowtime = int(round(time.time() * 1000)) + per_table_rows = 10 + self.sample_test_table(tbnum) + + tdLog.printNoPrefix("######## no data test:") + self.sample_current_query() + self.sample_error_query() + + tdLog.printNoPrefix("######## insert only NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime - 5})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime + 5})") + self.sample_current_query() + self.sample_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the max(bigint/double):") + # self.sample_test_table(tbnum) + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + # self.sample_current_query() + # self.sample_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the min(bigint/double):") + # self.sample_test_table(tbnum) + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})") + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})") + # self.sample_current_query() + # self.sample_error_query() + + tdLog.printNoPrefix("######## insert data without NULL data test:") + self.sample_test_table(tbnum) + self.sample_test_data(tbnum, per_table_rows, nowtime) + self.sample_current_query() + self.sample_error_query() + + + tdLog.printNoPrefix("######## insert data mix with NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime-(per_table_rows+3)*10})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime+(per_table_rows+3)*10})") + self.sample_current_query() + self.sample_error_query() + + + + tdLog.printNoPrefix("######## check after WAL test:") + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + self.sample_current_query() + self.sample_error_query() + + self.basic_sample_query() + + def run(self): + import traceback + try: + # run in develop branch + self.sample_test_run() + pass + except Exception as e: + traceback.print_exc() + raise e + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/2-query/spread.py b/tests/system-test/2-query/spread.py new file mode 100644 index 0000000000000000000000000000000000000000..d2dbbd03ede83d65ee475db23da144c2c4d6f4e7 --- /dev/null +++ b/tests/system-test/2-query/spread.py @@ -0,0 +1,358 @@ +import datetime + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +PRIMARY_COL = "ts" + +INT_COL = "c1" +BINT_COL = "c2" +SINT_COL = "c3" +TINT_COL = "c4" +FLOAT_COL = "c5" +DOUBLE_COL = "c6" +BOOL_COL = "c7" + +BINARY_COL = "c8" +NCHAR_COL = "c9" +TS_COL = "c10" + +NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ] +CHAR_COL = [ BINARY_COL, NCHAR_COL, ] +BOOLEAN_COL = [ BOOL_COL, ] +TS_TYPE_COL = [ TS_COL, ] + +ALL_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BOOL_COL, BINARY_COL, NCHAR_COL, TS_COL ] + +class TDTestCase: + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def __query_condition(self,tbname): + query_condition = [f"cast({col} as bigint)" for col in ALL_COL] + for num_col in NUM_COL: + query_condition.extend( + ( + f"{tbname}.{num_col}", + f"abs( {tbname}.{num_col} )", + f"acos( {tbname}.{num_col} )", + f"asin( {tbname}.{num_col} )", + f"atan( {tbname}.{num_col} )", + f"avg( {tbname}.{num_col} )", + f"ceil( {tbname}.{num_col} )", + f"cos( {tbname}.{num_col} )", + f"count( {tbname}.{num_col} )", + f"floor( {tbname}.{num_col} )", + f"log( {tbname}.{num_col}, {tbname}.{num_col})", + f"max( {tbname}.{num_col} )", + f"min( {tbname}.{num_col} )", + f"pow( {tbname}.{num_col}, 2)", + f"round( {tbname}.{num_col} )", + f"sum( {tbname}.{num_col} )", + f"sin( {tbname}.{num_col} )", + f"sqrt( {tbname}.{num_col} )", + f"tan( {tbname}.{num_col} )", + f"cast( {tbname}.{num_col} as timestamp)", + ) + ) + [ query_condition.append(f"{num_col} + {any_col}") for any_col in ALL_COL ] + for char_col in CHAR_COL: + query_condition.extend( + ( + f"count({tbname}.{char_col})", + f"sum(cast({tbname}.{char_col}) as bigint)", + f"max(cast({tbname}.{char_col}) as bigint)", + f"min(cast({tbname}.{char_col}) as bigint)", + f"avg(cast({tbname}.{char_col}) as bigint)", + ) + ) + query_condition.extend( + ( + 1010, + ) + ) + + return query_condition + + def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False): + table_reference = tb_list[0] + join_condition = table_reference + join = "inner join" if INNER else "join" + for i in range(len(tb_list[1:])): + join_condition += f" {join} {tb_list[i+1]} on {table_reference}.{filter}={tb_list[i+1]}.{filter}" + + return join_condition + + def __where_condition(self, col=None, tbname=None, query_conditon=None): + if query_conditon and isinstance(query_conditon, str): + if query_conditon.startswith("count"): + query_conditon = query_conditon[6:-1] + elif query_conditon.startswith("max"): + query_conditon = query_conditon[4:-1] + elif query_conditon.startswith("sum"): + query_conditon = query_conditon[4:-1] + elif query_conditon.startswith("min"): + query_conditon = query_conditon[4:-1] + + if query_conditon: + return f" where {query_conditon} is not null" + if col in NUM_COL: + return f" where abs( {tbname}.{col} ) >= 0" + if col in CHAR_COL: + return f" where lower( {tbname}.{col} ) like 'bina%' or lower( {tbname}.{col} ) like '_cha%' " + if col in BOOLEAN_COL: + return f" where {tbname}.{col} in (false, true) " + if col in TS_TYPE_COL or col in PRIMARY_COL: + return f" where cast( {tbname}.{col} as binary(16) ) is not null " + + return "" + + def __group_condition(self, col, having = None): + if isinstance(col, str): + if col.startswith("count"): + col = col[6:-1] + elif col.startswith("max"): + col = col[4:-1] + elif col.startswith("sum"): + col = col[4:-1] + elif col.startswith("min"): + col = col[4:-1] + return f" group by {col} having {having}" if having else f" group by {col} " + + def __single_sql(self, select_clause, from_clause, where_condition="", group_condition=""): + if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0] != from_clause.split(".")[0]: + return + return f"select spread({select_clause}) from {from_clause} {where_condition} {group_condition}" + + @property + def __tb_list(self): + return [ + "ct1", + "ct4", + "t1", + "ct2", + "stb1", + ] + + def sql_list(self): + sqls = [] + __no_join_tblist = self.__tb_list + for tb in __no_join_tblist: + select_claus_list = self.__query_condition(tb) + for select_claus in select_claus_list: + group_claus = self.__group_condition(col=select_claus) + where_claus = self.__where_condition(query_conditon=select_claus) + having_claus = self.__group_condition(col=select_claus, having=f"{select_claus} is not null") + sqls.extend( + ( + self.__single_sql(select_claus, tb, where_claus, having_claus), + self.__single_sql(select_claus, tb,), + self.__single_sql(select_claus, tb, where_condition=where_claus), + self.__single_sql(select_claus, tb, group_condition=group_claus), + ) + ) + + # return filter(None, sqls) + return list(filter(None, sqls)) + + def __get_type(self, col): + if tdSql.cursor.istype(col, "BOOL"): + return "BOOL" + if tdSql.cursor.istype(col, "INT"): + return "INT" + if tdSql.cursor.istype(col, "BIGINT"): + return "BIGINT" + if tdSql.cursor.istype(col, "TINYINT"): + return "TINYINT" + if tdSql.cursor.istype(col, "SMALLINT"): + return "SMALLINT" + if tdSql.cursor.istype(col, "FLOAT"): + return "FLOAT" + if tdSql.cursor.istype(col, "DOUBLE"): + return "DOUBLE" + if tdSql.cursor.istype(col, "BINARY"): + return "BINARY" + if tdSql.cursor.istype(col, "NCHAR"): + return "NCHAR" + if tdSql.cursor.istype(col, "TIMESTAMP"): + return "TIMESTAMP" + if tdSql.cursor.istype(col, "JSON"): + return "JSON" + if tdSql.cursor.istype(col, "TINYINT UNSIGNED"): + return "TINYINT UNSIGNED" + if tdSql.cursor.istype(col, "SMALLINT UNSIGNED"): + return "SMALLINT UNSIGNED" + if tdSql.cursor.istype(col, "INT UNSIGNED"): + return "INT UNSIGNED" + if tdSql.cursor.istype(col, "BIGINT UNSIGNED"): + return "BIGINT UNSIGNED" + + def spread_check(self): + sqls = self.sql_list() + tdLog.printNoPrefix("===step 1: curent case, must return query OK") + for i in range(len(sqls)): + tdLog.info(f"sql: {sqls[i]}") + tdSql.query(sqls[i]) + + def __test_current(self): + tdSql.query("select spread(ts) from ct1") + tdSql.checkRows(1) + tdSql.query("select spread(c1) from ct2") + tdSql.checkRows(1) + tdSql.query("select spread(c1) from ct4 group by c1") + tdSql.checkRows(self.rows + 3) + tdSql.query("select spread(c1) from ct4 group by c7") + tdSql.checkRows(3) + tdSql.query("select spread(ct2.c1) from ct4 join ct2 on ct4.ts=ct2.ts") + tdSql.checkRows(1) + + self.spread_check() + + def __test_error(self): + + tdLog.printNoPrefix("===step 0: err case, must return err") + tdSql.error( "select spread() from ct1" ) + tdSql.error( "select spread(1, 2) from ct2" ) + tdSql.error( f"select spread({NUM_COL[0]}, {NUM_COL[1]}) from ct4" ) + tdSql.error( f"select spread({BOOLEAN_COL[0]}) from t1" ) + tdSql.error( f"select spread({CHAR_COL[0]}) from stb1" ) + + # tdSql.error( ''' select spread(['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10']) + # from ct1 + # where ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null + # group by ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] + # having ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null ''' ) + # tdSql.error( "select c1 from ct1 union select c1 from ct2 union select c1 from ct4 ") + + def all_test(self): + self.__test_error() + self.__test_current() + + def __create_tb(self): + + tdLog.printNoPrefix("==========step1:create table") + create_stb_sql = f'''create table stb1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) tags (t1 int) + ''' + create_ntb_sql = f'''create table t1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) + ''' + tdSql.execute(create_stb_sql) + tdSql.execute(create_ntb_sql) + + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2} + + def __insert_data(self, rows): + now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) + for i in range(rows): + tdSql.execute( + f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f'''insert into ct1 values + ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) + ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) + ''' + ) + + tdSql.execute( + f'''insert into ct4 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000} + ) + ( + { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000} + ) + ''' + ) + + tdSql.execute( + f'''insert into ct2 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126, + { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127, + { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + for i in range(rows): + insert_data = f'''insert into t1 values + ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, + "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) + ''' + tdSql.execute(insert_data) + tdSql.execute( + f'''insert into t1 values + ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, + "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, + "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + + def run(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + self.__create_tb() + + tdLog.printNoPrefix("==========step2:insert data") + self.rows = 10 + self.__insert_data(self.rows) + + tdLog.printNoPrefix("==========step3:all check") + self.all_test() + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.execute("use db") + + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/sum.py b/tests/system-test/2-query/sum.py index 9521b005ddcfebedc08c4b618c936726ec4e3c85..ea0e1f7fae214fa009e7230b524959b7afef59da 100644 --- a/tests/system-test/2-query/sum.py +++ b/tests/system-test/2-query/sum.py @@ -218,13 +218,13 @@ class TDTestCase: tdLog.printNoPrefix("==========step3:all check") self.all_test() - # tdDnodes.stop(1) - # tdDnodes.start(1) + tdDnodes.stop(1) + tdDnodes.start(1) - # tdSql.execute("use db") + tdSql.execute("use db") - # tdLog.printNoPrefix("==========step4:after wal, all check again ") - # self.all_test() + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() def stop(self): tdSql.close() diff --git a/tests/system-test/2-query/top.py b/tests/system-test/2-query/top.py new file mode 100644 index 0000000000000000000000000000000000000000..fbbbb2c99acc2cce1b0cb53a0dafd7f18ec01011 --- /dev/null +++ b/tests/system-test/2-query/top.py @@ -0,0 +1,106 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.rowNum = 10 + self.ts = 1537146000000 + + def run(self): + tdSql.prepare() + + + + tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') + tdSql.execute("create table test1 using test tags('beijing')") + for i in range(self.rowNum): + tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + + + # top verifacation + tdSql.error("select top(ts, 10) from test") + tdSql.error("select top(col1, 0) from test") + tdSql.error("select top(col1, 101) from test") + tdSql.error("select top(col2, 0) from test") + tdSql.error("select top(col2, 101) from test") + tdSql.error("select top(col3, 0) from test") + tdSql.error("select top(col3, 101) from test") + tdSql.error("select top(col4, 0) from test") + tdSql.error("select top(col4, 101) from test") + tdSql.error("select top(col5, 0) from test") + tdSql.error("select top(col5, 101) from test") + tdSql.error("select top(col6, 0) from test") + tdSql.error("select top(col6, 101) from test") + tdSql.error("select top(col7, 10) from test") + tdSql.error("select top(col8, 10) from test") + tdSql.error("select top(col9, 10) from test") + tdSql.error("select top(col11, 0) from test") + tdSql.error("select top(col11, 101) from test") + tdSql.error("select top(col12, 0) from test") + tdSql.error("select top(col12, 101) from test") + tdSql.error("select top(col13, 0) from test") + tdSql.error("select top(col13, 101) from test") + tdSql.error("select top(col14, 0) from test") + tdSql.error("select top(col14, 101) from test") + + tdSql.query("select top(col1, 2) from test") + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)]) + tdSql.query("select top(col2, 2) from test") + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)]) + tdSql.query("select top(col3, 2) from test") + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)]) + tdSql.query("select top(col4, 2) from test") + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)]) + tdSql.query("select top(col11, 2) from test") + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)]) + tdSql.query("select top(col12, 2) from test") + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)]) + tdSql.query("select top(col13, 2) from test") + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)]) + tdSql.query("select top(col14, 2) from test") + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)]) + tdSql.query("select ts,top(col1, 2),ts from test1") + tdSql.checkRows(2) + tdSql.query("select top(col14, 100) from test") + tdSql.checkRows(10) + tdSql.query("select ts,top(col1, 2),ts from test group by tbname") + tdSql.checkRows(2) + tdSql.query('select top(col2,1) from test interval(1y) order by col2') + tdSql.checkData(0,0,10) + + tdSql.error("select * from test where bottom(col2,1)=1") + tdSql.error("select top(col14, 0) from test;") + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/union.py b/tests/system-test/2-query/union.py index 935e91afdbd0ec0417222acc0d85d99799aed545..88767ab888c9bfe11c329eecd41f78442436cafb 100644 --- a/tests/system-test/2-query/union.py +++ b/tests/system-test/2-query/union.py @@ -35,8 +35,6 @@ class TDTestCase: for char_col in CHAR_COL: query_condition.extend( ( - f"rtrim( {tbname}.{char_col} )", - f"substr( {tbname}.{char_col}, 1 )", f"count( {tbname}.{char_col} )", f"cast( {tbname}.{char_col} as nchar(3) )", ) @@ -45,11 +43,7 @@ class TDTestCase: for num_col in NUM_COL: query_condition.extend( ( - f"{tbname}.{num_col}", - f"floor( {tbname}.{num_col} )", f"log( {tbname}.{num_col}, {tbname}.{num_col})", - f"sin( {tbname}.{num_col} )", - f"sqrt( {tbname}.{num_col} )", ) ) @@ -96,7 +90,6 @@ class TDTestCase: return "" - def __group_condition(self, col, having = None): if isinstance(col, str): if col.startswith("count"): @@ -114,15 +107,10 @@ class TDTestCase: return return f"select {select_clause} from {from_clause} {where_condition} {group_condition}" - @property def __join_tblist(self): return [ - ["ct1", "ct2"], - ["ct1", "ct4"], ["ct1", "t1"], - ["ct2", "ct4"], - ["ct2", "t1"], ["ct4", "t1"], # ["ct1", "ct2", "ct4"], # ["ct1", "ct2", "t1"], @@ -135,9 +123,7 @@ class TDTestCase: def __tb_liast(self): return [ "ct1", - "ct2", "ct4", - "t1", ] def sql_list(self): @@ -152,15 +138,7 @@ class TDTestCase: having_claus = self.__group_condition( col=select_claus, having=f"{select_claus} is not null") sqls.extend( ( - self.__single_sql(select_claus, join_tb, where_claus, group_claus), - self.__single_sql(select_claus, join_tb, where_claus, having_claus), - self.__single_sql(select_claus, self.__join_condition(join_tblist), where_claus, having_claus), self.__single_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, having_claus), - self.__single_sql(select_claus, join_tb, where_claus), - self.__single_sql(select_claus, join_tb, having_claus), - self.__single_sql(select_claus, join_tb, group_claus), - self.__single_sql(select_claus, join_tb), - ) ) __no_join_tblist = self.__tb_liast @@ -172,12 +150,7 @@ class TDTestCase: having_claus = self.__group_condition(col=select_claus, having=f"{select_claus} is not null") sqls.extend( ( - self.__single_sql(select_claus, join_tb, where_claus, group_claus), - self.__single_sql(select_claus, join_tb, where_claus, having_claus), - self.__single_sql(select_claus, join_tb, where_claus), - self.__single_sql(select_claus, join_tb, group_claus), - self.__single_sql(select_claus, join_tb, having_claus), - self.__single_sql(select_claus, join_tb), + self.__single_sql(select_claus, tb, where_claus, having_claus), ) ) @@ -221,6 +194,8 @@ class TDTestCase: for i in range(len(sqls)): tdSql.query(sqls[i]) res1_type = self.__get_type(0) + # if i % 5 == 0: + # tdLog.success(f"{i} : sql is already executing!") for j in range(len(sqls[i:])): tdSql.query(sqls[j+i]) order_union_type = False @@ -246,22 +221,12 @@ class TDTestCase: rev_order_type = True if all_union_type: - tdSql.query(f"{sqls[i]} union {sqls[j+i]}") - tdSql.query(f"{sqls[j+i]} union {sqls[i]}") - tdSql.checkCols(1) - tdSql.query(f"{sqls[i]} union all {sqls[j+i]}") - tdSql.query(f"{sqls[j+i]} union all {sqls[i]}") - tdSql.checkCols(1) + tdSql.execute(f"{sqls[i]} union {sqls[j+i]}") + tdSql.execute(f"{sqls[j+i]} union all {sqls[i]}") elif order_union_type: - tdSql.query(f"{sqls[i]} union {sqls[j+i]}") - tdSql.checkCols(1) - tdSql.query(f"{sqls[i]} union all {sqls[j+i]}") - tdSql.checkCols(1) + tdSql.execute(f"{sqls[i]} union all {sqls[j+i]}") elif rev_order_type: - tdSql.query(f"{sqls[j+i]} union {sqls[i]}") - tdSql.checkCols(1) - tdSql.query(f"{sqls[j+i]} union all {sqls[i]}") - tdSql.checkCols(1) + tdSql.execute(f"{sqls[j+i]} union {sqls[i]}") else: tdSql.error(f"{sqls[i]} union {sqls[j+i]}") @@ -273,7 +238,7 @@ class TDTestCase: tdSql.error( "select c1 from ct1 union all drop table ct3" ) tdSql.error( "select c1 from ct1 union all '' " ) tdSql.error( " '' union all select c1 from ct1 " ) - tdSql.error( "select c1 from ct1 union select c1 from ct2 union select c1 from ct4 ") + # tdSql.error( "select c1 from ct1 union select c1 from ct2 union select c1 from ct4 ") def all_test(self): self.__test_error() diff --git a/tests/system-test/2-query/union1.py b/tests/system-test/2-query/union1.py new file mode 100644 index 0000000000000000000000000000000000000000..ea6940246e6fed6b9a2c8512f69fde4d3d3a6d70 --- /dev/null +++ b/tests/system-test/2-query/union1.py @@ -0,0 +1,370 @@ +import datetime + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +PRIMARY_COL = "ts" + +INT_COL = "c1" +BINT_COL = "c2" +SINT_COL = "c3" +TINT_COL = "c4" +FLOAT_COL = "c5" +DOUBLE_COL = "c6" +BOOL_COL = "c7" + +BINARY_COL = "c8" +NCHAR_COL = "c9" +TS_COL = "c10" + +NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ] +CHAR_COL = [ BINARY_COL, NCHAR_COL, ] +BOOLEAN_COL = [ BOOL_COL, ] +TS_TYPE_COL = [ TS_COL, ] + +class TDTestCase: + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def __query_condition(self,tbname): + query_condition = [] + for char_col in CHAR_COL: + query_condition.extend( + ( + f"count( {tbname}.{char_col} )", + f"cast( {tbname}.{char_col} as nchar(3) )", + ) + ) + + for num_col in NUM_COL: + query_condition.extend( + ( + f"log( {tbname}.{num_col}, {tbname}.{num_col})", + ) + ) + + query_condition.extend( + ( + ''' "test12" ''', + # 1010, + ) + ) + + return query_condition + + def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False): + table_reference = tb_list[0] + join_condition = table_reference + join = "inner join" if INNER else "join" + for i in range(len(tb_list[1:])): + join_condition += f" {join} {tb_list[i+1]} on {table_reference}.{filter}={tb_list[i+1]}.{filter}" + + return join_condition + + def __where_condition(self, col=None, tbname=None, query_conditon=None): + if query_conditon and isinstance(query_conditon, str): + if query_conditon.startswith("count"): + query_conditon = query_conditon[6:-1] + elif query_conditon.startswith("max"): + query_conditon = query_conditon[4:-1] + elif query_conditon.startswith("sum"): + query_conditon = query_conditon[4:-1] + elif query_conditon.startswith("min"): + query_conditon = query_conditon[4:-1] + + + if query_conditon: + return f" where {query_conditon} is not null" + if col in NUM_COL: + return f" where abs( {tbname}.{col} ) >= 0" + if col in CHAR_COL: + return f" where lower( {tbname}.{col} ) like 'bina%' or lower( {tbname}.{col} ) like '_cha%' " + if col in BOOLEAN_COL: + return f" where {tbname}.{col} in (false, true) " + if col in TS_TYPE_COL or col in PRIMARY_COL: + return f" where cast( {tbname}.{col} as binary(16) ) is not null " + + return "" + + def __group_condition(self, col, having = None): + if isinstance(col, str): + if col.startswith("count"): + col = col[6:-1] + elif col.startswith("max"): + col = col[4:-1] + elif col.startswith("sum"): + col = col[4:-1] + elif col.startswith("min"): + col = col[4:-1] + return f" group by {col} having {having}" if having else f" group by {col} " + + def __single_sql(self, select_clause, from_clause, where_condition="", group_condition=""): + if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0] != from_clause.split(".")[0]: + return + return f"select {select_clause} from {from_clause} {where_condition} {group_condition}" + + @property + def __join_tblist(self): + return [ + ["ct1", "ct2"], + # ["ct1", "ct2", "ct4"], + # ["ct1", "ct2", "t1"], + # ["ct1", "ct4", "t1"], + # ["ct2", "ct4", "t1"], + # ["ct1", "ct2", "ct4", "t1"], + ] + + @property + def __tb_liast(self): + return [ + "t1", + "stb1", + ] + + def sql_list(self): + sqls = [] + __join_tblist = self.__join_tblist + for join_tblist in __join_tblist: + for join_tb in join_tblist: + select_claus_list = self.__query_condition(join_tb) + for select_claus in select_claus_list: + group_claus = self.__group_condition( col=select_claus) + where_claus = self.__where_condition(query_conditon=select_claus) + having_claus = self.__group_condition( col=select_claus, having=f"{select_claus} is not null") + sqls.extend( + ( + self.__single_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, having_claus), + ) + ) + __no_join_tblist = self.__tb_liast + for tb in __no_join_tblist: + select_claus_list = self.__query_condition(tb) + for select_claus in select_claus_list: + group_claus = self.__group_condition(col=select_claus) + where_claus = self.__where_condition(query_conditon=select_claus) + having_claus = self.__group_condition(col=select_claus, having=f"{select_claus} is not null") + sqls.extend( + ( + self.__single_sql(select_claus, tb, where_claus, having_claus), + ) + ) + + # return filter(None, sqls) + return list(filter(None, sqls)) + + def __get_type(self, col): + if tdSql.cursor.istype(col, "BOOL"): + return "BOOL" + if tdSql.cursor.istype(col, "INT"): + return "INT" + if tdSql.cursor.istype(col, "BIGINT"): + return "BIGINT" + if tdSql.cursor.istype(col, "TINYINT"): + return "TINYINT" + if tdSql.cursor.istype(col, "SMALLINT"): + return "SMALLINT" + if tdSql.cursor.istype(col, "FLOAT"): + return "FLOAT" + if tdSql.cursor.istype(col, "DOUBLE"): + return "DOUBLE" + if tdSql.cursor.istype(col, "BINARY"): + return "BINARY" + if tdSql.cursor.istype(col, "NCHAR"): + return "NCHAR" + if tdSql.cursor.istype(col, "TIMESTAMP"): + return "TIMESTAMP" + if tdSql.cursor.istype(col, "JSON"): + return "JSON" + if tdSql.cursor.istype(col, "TINYINT UNSIGNED"): + return "TINYINT UNSIGNED" + if tdSql.cursor.istype(col, "SMALLINT UNSIGNED"): + return "SMALLINT UNSIGNED" + if tdSql.cursor.istype(col, "INT UNSIGNED"): + return "INT UNSIGNED" + if tdSql.cursor.istype(col, "BIGINT UNSIGNED"): + return "BIGINT UNSIGNED" + + def union_check(self): + sqls = self.sql_list() + for i in range(len(sqls)): + tdSql.query(sqls[i]) + res1_type = self.__get_type(0) + # if i % 5 == 0: + # tdLog.success(f"{i} : sql is already executing!") + for j in range(len(sqls[i:])): + tdSql.query(sqls[j+i]) + order_union_type = False + rev_order_type = False + all_union_type = False + res2_type = self.__get_type(0) + + if res2_type == res1_type: + all_union_type = True + elif res1_type in ( "BIGINT" , "NCHAR" ) and res2_type in ("BIGINT" , "NCHAR"): + all_union_type = True + elif res1_type in ("BIGINT", "NCHAR"): + order_union_type = True + elif res2_type in ("BIGINT", "NCHAR"): + rev_order_type = True + elif res1_type == "TIMESAMP" and res2_type not in ("BINARY", "NCHAR"): + order_union_type = True + elif res2_type == "TIMESAMP" and res1_type not in ("BINARY", "NCHAR"): + rev_order_type = True + elif res1_type == "BINARY" and res2_type != "NCHAR": + order_union_type = True + elif res2_type == "BINARY" and res1_type != "NCHAR": + rev_order_type = True + + if all_union_type: + tdSql.execute(f"{sqls[i]} union {sqls[j+i]}") + tdSql.execute(f"{sqls[j+i]} union all {sqls[i]}") + elif order_union_type: + tdSql.execute(f"{sqls[i]} union all {sqls[j+i]}") + elif rev_order_type: + tdSql.execute(f"{sqls[j+i]} union {sqls[i]}") + else: + tdSql.error(f"{sqls[i]} union {sqls[j+i]}") + + def __test_error(self): + + tdSql.error( "show tables union show tables" ) + tdSql.error( "create table errtb1 union all create table errtb2" ) + tdSql.error( "drop table ct1 union all drop table ct3" ) + tdSql.error( "select c1 from ct1 union all drop table ct3" ) + tdSql.error( "select c1 from ct1 union all '' " ) + tdSql.error( " '' union all select c1 from ct1 " ) + # tdSql.error( "select c1 from ct1 union select c1 from ct2 union select c1 from ct4 ") + + def all_test(self): + self.__test_error() + self.union_check() + + + def __create_tb(self): + + tdLog.printNoPrefix("==========step1:create table") + create_stb_sql = f'''create table stb1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) tags (t1 int) + ''' + create_ntb_sql = f'''create table t1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) + ''' + tdSql.execute(create_stb_sql) + tdSql.execute(create_ntb_sql) + + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2} + + def __insert_data(self, rows): + now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) + for i in range(rows): + tdSql.execute( + f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f'''insert into ct1 values + ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) + ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) + ''' + ) + + tdSql.execute( + f'''insert into ct4 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000} + ) + ( + { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000} + ) + ''' + ) + + tdSql.execute( + f'''insert into ct2 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126, + { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127, + { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + for i in range(rows): + insert_data = f'''insert into t1 values + ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, + "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) + ''' + tdSql.execute(insert_data) + tdSql.execute( + f'''insert into t1 values + ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, + "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, + "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + + def run(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + self.__create_tb() + + tdLog.printNoPrefix("==========step2:insert data") + self.rows = 10 + self.__insert_data(self.rows) + + tdLog.printNoPrefix("==========step3:all check") + self.all_test() + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.execute("use db") + + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/basic5.py b/tests/system-test/7-tmq/basic5.py index 4a29cacd97b7bad3bcd469fe1ebc2b445061397a..500e8671217f5d4bb8ae0793f288791095303135 100644 --- a/tests/system-test/7-tmq/basic5.py +++ b/tests/system-test/7-tmq/basic5.py @@ -22,8 +22,8 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - #tdSql.init(conn.cursor()) - tdSql.init(conn.cursor(), logSql) # output sql.txt file + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -186,7 +186,7 @@ class TDTestCase: time.sleep(1) tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 @@ -228,7 +228,7 @@ class TDTestCase: 'stbName': 'stb', \ 'ctbNum': 10, \ 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ + 'batchNum': 200, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath @@ -300,7 +300,7 @@ class TDTestCase: time.sleep(1) tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 @@ -349,8 +349,8 @@ class TDTestCase: 'vgroups': 1, \ 'stbName': 'stb', \ 'ctbNum': 10, \ - 'rowsPerTbl': 30000, \ - 'batchNum': 100, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 200, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath @@ -381,8 +381,8 @@ class TDTestCase: 'vgroups': 1, \ 'stbName': 'stb2', \ 'ctbNum': 10, \ - 'rowsPerTbl': 30000, \ - 'batchNum': 100, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 200, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict2['cfg'] = cfgPath tdSql.execute("create stable if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(parameterDict2['dbName'], parameterDict2['stbName'])) @@ -432,7 +432,7 @@ class TDTestCase: time.sleep(1) tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 diff --git a/tests/system-test/7-tmq/subscribeDb.py b/tests/system-test/7-tmq/subscribeDb.py index b536a70515a38eec91a4a007a2f4850c0056e89e..157bc7928b2800c5ba68c5f1b65ec601274dc4b9 100644 --- a/tests/system-test/7-tmq/subscribeDb.py +++ b/tests/system-test/7-tmq/subscribeDb.py @@ -22,8 +22,8 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - #tdSql.init(conn.cursor()) - tdSql.init(conn.cursor(), logSql) # output sql.txt file + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -93,7 +93,7 @@ class TDTestCase: tdLog.info(shellCmd) os.system(shellCmd) - def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum,rowsPerTbl): + def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum): tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) tsql.execute("use %s" %dbName) tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName) @@ -147,8 +147,7 @@ class TDTestCase: parameterDict["dbName"],\ parameterDict["vgroups"],\ parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"]) + parameterDict["ctbNum"]) self.insert_data(tsql,\ parameterDict["dbName"],\ @@ -168,7 +167,7 @@ class TDTestCase: 'vgroups': 4, \ 'stbName': 'stb', \ 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ + 'rowsPerTbl': 5000, \ 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath @@ -198,7 +197,7 @@ class TDTestCase: event.wait() tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -237,7 +236,7 @@ class TDTestCase: self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 20 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -265,7 +264,7 @@ class TDTestCase: 'vgroups': 4, \ 'stbName': 'stb', \ 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ + 'rowsPerTbl': 5000, \ 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath @@ -299,7 +298,7 @@ class TDTestCase: event.wait() tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 20 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -314,24 +313,24 @@ class TDTestCase: for i in range(expectRows): totalConsumeRows += resultList[i] - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + if not (totalConsumeRows >= expectrowcnt): tdLog.exit("tmq consume rows error!") tdSql.query("drop topic %s"%topicName1) tdLog.printNoPrefix("======== test case 2 end ...... ") - def tmqCase3(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 3: Produce while one consumers to subscribe one db, include 2 stb") + def tmqCase2a(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 2a: Produce while two consumers to subscribe one db, inclue 1 stb") tdLog.info("step 1: create database, stb, ctb and insert data") # create and start thread parameterDict = {'cfg': '', \ - 'dbName': 'db3', \ + 'dbName': 'db2a', \ 'vgroups': 4, \ - 'stbName': 'stb', \ + 'stbName': 'stb1', \ 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ + 'rowsPerTbl': 5000, \ 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath @@ -339,110 +338,15 @@ class TDTestCase: self.initConsumerTable() tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - - parameterDict2 = {'cfg': '', \ - 'dbName': 'db3', \ - 'vgroups': 4, \ - 'stbName': 'stb2', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - tdLog.info("create topics from db") - topicName1 = 'topic_db1' - - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + tdSql.execute("create table if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] - topicList = topicName1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - # consumerId = 1 - # self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - event.wait() - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - # wait for data ready - prepareEnvThread.join() - prepareEnvThread2.join() - - tdLog.info("insert process end, and start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicName1) - - tdLog.printNoPrefix("======== test case 3 end ...... ") - - def tmqCase4(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 4: Produce while two consumers to subscribe one db, include 2 stb") - tdLog.info("step 1: create database, stb, ctb and insert data") - # create and start thread - parameterDict = {'cfg': '', \ - 'dbName': 'db4', \ - 'vgroups': 4, \ - 'stbName': 'stb', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.initConsumerTable() - - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - - parameterDict2 = {'cfg': '', \ - 'dbName': 'db4', \ - 'vgroups': 4, \ - 'stbName': 'stb2', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - tdLog.info("create topics from db") topicName1 = 'topic_db1' tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] topicList = topicName1 ifcheckdata = 0 ifManualCommit = 1 @@ -451,101 +355,25 @@ class TDTestCase: auto.commit.interval.ms:6000,\ auto.offset.reset:earliest' self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - event.wait() - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - # wait for data ready - prepareEnvThread.join() - prepareEnvThread2.join() - - tdLog.info("insert process end, and start to check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicName1) - - tdLog.printNoPrefix("======== test case 4 end ...... ") - - def tmqCase5(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 5: Produce while two consumers to subscribe one db, firstly create one stb, after start consume create other stb") - tdLog.info("step 1: create database, stb, ctb and insert data") - # create and start thread - parameterDict = {'cfg': '', \ - 'dbName': 'db5', \ - 'vgroups': 4, \ - 'stbName': 'stb', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - self.initConsumerTable() - - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - - parameterDict2 = {'cfg': '', \ - 'dbName': 'db5', \ - 'vgroups': 4, \ - 'stbName': 'stb2', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - tdLog.info("create topics from db") - topicName1 = 'topic_db1' - - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) - - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] - topicList = topicName1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ + consumerId = 1 + keyList = 'group.id:cgrp2,\ enable.auto.commit:false,\ auto.commit.interval.ms:6000,\ auto.offset.reset:earliest' self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - event.wait() - tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 - showRow = 1 + showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() # wait for data ready - prepareEnvThread.join() - prepareEnvThread2.join() + prepareEnvThread.join() tdLog.info("insert process end, and start to check consume result") expectRows = 2 @@ -554,24 +382,24 @@ class TDTestCase: for i in range(expectRows): totalConsumeRows += resultList[i] - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + if totalConsumeRows != expectrowcnt * 2: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) tdLog.exit("tmq consume rows error!") tdSql.query("drop topic %s"%topicName1) - tdLog.printNoPrefix("======== test case 5 end ...... ") + tdLog.printNoPrefix("======== test case 2a end ...... ") - def tmqCase6(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 6: Produce while one consumers to subscribe tow topic, Each contains one db") + def tmqCase3(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 3: Produce while one consumers to subscribe one db, include 2 stb") tdLog.info("step 1: create database, stb, ctb and insert data") # create and start thread parameterDict = {'cfg': '', \ - 'dbName': 'db60', \ + 'dbName': 'db3', \ 'vgroups': 4, \ 'stbName': 'stb', \ 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ + 'rowsPerTbl': 5000, \ 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath @@ -584,30 +412,26 @@ class TDTestCase: prepareEnvThread.start() parameterDict2 = {'cfg': '', \ - 'dbName': 'db61', \ + 'dbName': 'db3', \ 'vgroups': 4, \ 'stbName': 'stb2', \ 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ + 'rowsPerTbl': 5000, \ 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict2['dbName'], parameterDict2['vgroups'])) - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) prepareEnvThread2.start() tdLog.info("create topics from db") - topicName1 = 'topic_db60' - topicName2 = 'topic_db61' + topicName1 = 'topic_db1' - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) - tdSql.execute("create topic %s as %s" %(topicName2, parameterDict2['dbName'])) + tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] - topicList = topicName1 + ',' + topicName2 + topicList = topicName1 ifcheckdata = 0 ifManualCommit = 0 keyList = 'group.id:cgrp1,\ @@ -616,13 +440,13 @@ class TDTestCase: auto.offset.reset:earliest' self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - #consumerId = 1 - #self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + # consumerId = 1 + # self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) event.wait() tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -643,94 +467,8 @@ class TDTestCase: tdLog.exit("tmq consume rows error!") tdSql.query("drop topic %s"%topicName1) - tdSql.query("drop topic %s"%topicName2) - - tdLog.printNoPrefix("======== test case 6 end ...... ") - - def tmqCase7(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 7: Produce while two consumers to subscribe tow topic, Each contains one db") - tdLog.info("step 1: create database, stb, ctb and insert data") - # create and start thread - parameterDict = {'cfg': '', \ - 'dbName': 'db70', \ - 'vgroups': 4, \ - 'stbName': 'stb', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.initConsumerTable() - - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - - parameterDict2 = {'cfg': '', \ - 'dbName': 'db71', \ - 'vgroups': 4, \ - 'stbName': 'stb2', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict2['dbName'], parameterDict2['vgroups'])) - - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - tdLog.info("create topics from db") - topicName1 = 'topic_db60' - topicName2 = 'topic_db61' - - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) - tdSql.execute("create topic %s as %s" %(topicName2, parameterDict2['dbName'])) - - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] - topicList = topicName1 + ',' + topicName2 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - event.wait() - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - # wait for data ready - prepareEnvThread.join() - prepareEnvThread2.join() - - tdLog.info("insert process end, and start to check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicName1) - tdSql.query("drop topic %s"%topicName2) - tdLog.printNoPrefix("======== test case 7 end ...... ") + tdLog.printNoPrefix("======== test case 3 end ...... ") def run(self): tdSql.prepare() @@ -745,13 +483,9 @@ class TDTestCase: self.tmqCase1(cfgPath, buildPath) self.tmqCase2(cfgPath, buildPath) + self.tmqCase2a(cfgPath, buildPath) self.tmqCase3(cfgPath, buildPath) - self.tmqCase4(cfgPath, buildPath) - self.tmqCase5(cfgPath, buildPath) - self.tmqCase6(cfgPath, buildPath) - self.tmqCase7(cfgPath, buildPath) - - + def stop(self): tdSql.close() tdLog.success(f"{__file__} successfully executed") diff --git a/tests/system-test/7-tmq/subscribeDb0.py b/tests/system-test/7-tmq/subscribeDb0.py new file mode 100644 index 0000000000000000000000000000000000000000..d6f93acfd6599c60708f0726caf26b7fec01a0f3 --- /dev/null +++ b/tests/system-test/7-tmq/subscribeDb0.py @@ -0,0 +1,515 @@ + +import taos +import sys +import time +import socket +import os +import threading + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class TDTestCase: + hostname = socket.gethostname() + #rpcDebugFlagVal = '143' + #clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #print ("===================: ", updatecfgDict) + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def newcur(self,cfg,host,port): + user = "root" + password = "taosdata" + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) + cur=con.cursor() + print(cur) + return cur + + def initConsumerTable(self,cdbName='cdb'): + tdLog.info("create consume database, and consume info table, and consume result table") + tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) + + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) + + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) + tdLog.info("consume info sql: %s"%sql) + tdSql.query(sql) + + def selectConsumeResult(self,expectRows,cdbName='cdb'): + resultList=[] + while 1: + tdSql.query("select * from %s.consumeresult"%cdbName) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == expectRows: + break + else: + time.sleep(5) + + for i in range(expectRows): + tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) + resultList.append(tdSql.getData(i , 3)) + + return resultList + + def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): + shellCmd = 'nohup ' + if valgrind == 1: + logFile = cfgPath + '/../log/valgrind-tmq.log' + shellCmd = 'nohup valgrind --log-file=' + logFile + shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' + + shellCmd += buildPath + '/build/bin/tmq_sim -c ' + cfgPath + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> /dev/null 2>&1 &" + tdLog.info(shellCmd) + os.system(shellCmd) + + def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum): + tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) + tsql.execute("use %s" %dbName) + tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName) + pre_create = "create table" + sql = pre_create + #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + for i in range(ctbNum): + sql += " %s_%d using %s tags(%d)"%(stbName,i,stbName,i+1) + if (i > 0) and (i%100 == 0): + tsql.execute(sql) + sql = pre_create + if sql != pre_create: + tsql.execute(sql) + + event.set() + tdLog.debug("complete to create database[%s], stable[%s] and %d child tables" %(dbName, stbName, ctbNum)) + return + + def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + t = time.time() + startTs = int(round(t * 1000)) + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + for i in range(ctbNum): + sql += " %s_%d values "%(stbName,i) + for j in range(rowsPerTbl): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j) + if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + if j < rowsPerTbl - 1: + sql = "insert into %s_%d values " %(stbName,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def prepareEnv(self, **parameterDict): + print ("input parameters:") + print (parameterDict) + # create new connector for my thread + tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) + self.create_tables(tsql,\ + parameterDict["dbName"],\ + parameterDict["vgroups"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"]) + + self.insert_data(tsql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"],\ + parameterDict["startTs"]) + return + + def tmqCase4(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 4: Produce while two consumers to subscribe one db, include 2 stb") + tdLog.info("step 1: create database, stb, ctb and insert data") + # create and start thread + parameterDict = {'cfg': '', \ + 'dbName': 'db4', \ + 'vgroups': 4, \ + 'stbName': 'stb', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 5000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.initConsumerTable() + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + + parameterDict2 = {'cfg': '', \ + 'dbName': 'db4', \ + 'vgroups': 4, \ + 'stbName': 'stb2', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 5000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) + prepareEnvThread2.start() + + tdLog.info("create topics from db") + topicName1 = 'topic_db1' + + tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] + topicList = topicName1 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + consumerId = 1 + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + event.wait() + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + # wait for data ready + prepareEnvThread.join() + prepareEnvThread2.join() + + tdLog.info("insert process end, and start to check consume result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicName1) + + tdLog.printNoPrefix("======== test case 4 end ...... ") + + def tmqCase5(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 5: Produce while two consumers to subscribe one db, firstly create one stb, after start consume create other stb") + tdLog.info("step 1: create database, stb, ctb and insert data") + # create and start thread + parameterDict = {'cfg': '', \ + 'dbName': 'db5', \ + 'vgroups': 4, \ + 'stbName': 'stb', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 5000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.initConsumerTable() + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + + parameterDict2 = {'cfg': '', \ + 'dbName': 'db5', \ + 'vgroups': 4, \ + 'stbName': 'stb2', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 5000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + tdLog.info("create topics from db") + topicName1 = 'topic_db1' + + tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] + topicList = topicName1 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + consumerId = 1 + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + event.wait() + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) + prepareEnvThread2.start() + + # wait for data ready + prepareEnvThread.join() + prepareEnvThread2.join() + + tdLog.info("insert process end, and start to check consume result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows < expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicName1) + + tdLog.printNoPrefix("======== test case 5 end ...... ") + + def tmqCase6(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 6: Produce while one consumers to subscribe tow topic, Each contains one db") + tdLog.info("step 1: create database, stb, ctb and insert data") + # create and start thread + parameterDict = {'cfg': '', \ + 'dbName': 'db60', \ + 'vgroups': 4, \ + 'stbName': 'stb', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 5000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.initConsumerTable() + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + + parameterDict2 = {'cfg': '', \ + 'dbName': 'db61', \ + 'vgroups': 4, \ + 'stbName': 'stb2', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 5000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict2['dbName'], parameterDict2['vgroups'])) + + prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) + prepareEnvThread2.start() + + tdLog.info("create topics from db") + topicName1 = 'topic_db60' + topicName2 = 'topic_db61' + + tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + tdSql.execute("create topic %s as %s" %(topicName2, parameterDict2['dbName'])) + + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] + topicList = topicName1 + ',' + topicName2 + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + #consumerId = 1 + #self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + event.wait() + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + # wait for data ready + prepareEnvThread.join() + prepareEnvThread2.join() + + tdLog.info("insert process end, and start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicName1) + tdSql.query("drop topic %s"%topicName2) + + tdLog.printNoPrefix("======== test case 6 end ...... ") + + def tmqCase7(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 7: Produce while two consumers to subscribe tow topic, Each contains one db") + tdLog.info("step 1: create database, stb, ctb and insert data") + # create and start thread + parameterDict = {'cfg': '', \ + 'dbName': 'db70', \ + 'vgroups': 4, \ + 'stbName': 'stb', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 5000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.initConsumerTable() + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + + parameterDict2 = {'cfg': '', \ + 'dbName': 'db71', \ + 'vgroups': 4, \ + 'stbName': 'stb2', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 5000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict2['dbName'], parameterDict2['vgroups'])) + + prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) + prepareEnvThread2.start() + + tdLog.info("create topics from db") + topicName1 = 'topic_db60' + topicName2 = 'topic_db61' + + tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + tdSql.execute("create topic %s as %s" %(topicName2, parameterDict2['dbName'])) + + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] + topicList = topicName1 + ',' + topicName2 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + consumerId = 1 + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + event.wait() + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + # wait for data ready + prepareEnvThread.join() + prepareEnvThread2.join() + + tdLog.info("insert process end, and start to check consume result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicName1) + tdSql.query("drop topic %s"%topicName2) + + tdLog.printNoPrefix("======== test case 7 end ...... ") + + def run(self): + tdSql.prepare() + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + cfgPath = buildPath + "/../sim/psim/cfg" + tdLog.info("cfgPath: %s" % cfgPath) + + self.tmqCase4(cfgPath, buildPath) + self.tmqCase5(cfgPath, buildPath) + self.tmqCase6(cfgPath, buildPath) + self.tmqCase7(cfgPath, buildPath) + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/subscribeDb1.py b/tests/system-test/7-tmq/subscribeDb1.py index a00bed30e4ad680b0113d562a7c88c63a3b6af45..56db157ab849f609eb22debde6936d2de406ee06 100644 --- a/tests/system-test/7-tmq/subscribeDb1.py +++ b/tests/system-test/7-tmq/subscribeDb1.py @@ -22,8 +22,8 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - #tdSql.init(conn.cursor()) - tdSql.init(conn.cursor(), logSql) # output sql.txt file + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -198,7 +198,7 @@ class TDTestCase: event.wait() tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -276,7 +276,7 @@ class TDTestCase: event.wait() tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -291,10 +291,9 @@ class TDTestCase: for i in range(expectRows): totalConsumeRows += resultList[i] + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") - tdLog.info("again start consume processer") self.initConsumerTable() @@ -303,12 +302,13 @@ class TDTestCase: self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) expectRows = 1 resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 + totalConsumeRows2 = 0 for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/2)) + totalConsumeRows2 += resultList[i] + + tdLog.info("firstly act consume rows: %d"%(totalConsumeRows)) + tdLog.info("secondly act consume rows: %d, expect consume rows: %d"%(totalConsumeRows2, expectrowcnt)) + if totalConsumeRows + totalConsumeRows2 != expectrowcnt: tdLog.exit("tmq consume rows error!") tdSql.query("drop topic %s"%topicName1) @@ -354,7 +354,7 @@ class TDTestCase: event.wait() tdLog.info("start consume processor") - pollDelay = 15 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -425,7 +425,7 @@ class TDTestCase: event.wait() tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) diff --git a/tests/system-test/7-tmq/subscribeStb.py b/tests/system-test/7-tmq/subscribeStb.py index d0463be710e77e7bf4f9f3d35e66a2acddee42a3..2b7f0d3d5ff06ea0c36f9768c3a7f6d3eae715a0 100644 --- a/tests/system-test/7-tmq/subscribeStb.py +++ b/tests/system-test/7-tmq/subscribeStb.py @@ -29,8 +29,8 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - #tdSql.init(conn.cursor()) - tdSql.init(conn.cursor(), logSql) # output sql.txt file + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -346,1024 +346,6 @@ class TDTestCase: tdLog.printNoPrefix("======== test case 2 end ...... ") - def tmqCase3(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 3: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db3', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 13, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - time.sleep(2) - tdLog.info("drop som child table of stb1") - dropTblNum = 4 - tdSql.query("drop table if exists %s.%s_9"%(parameterDict["dbName"], parameterDict["stbName"])) - tdSql.query("drop table if exists %s.%s_8"%(parameterDict["dbName"], parameterDict["stbName"])) - tdSql.query("drop table if exists %s.%s_7"%(parameterDict["dbName"], parameterDict["stbName"])) - tdSql.query("drop table if exists %s.%s_3"%(parameterDict["dbName"], parameterDict["stbName"])) - - tdLog.info("drop some child tables, then start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - remaindrowcnt = parameterDict["rowsPerTbl"] * (parameterDict["ctbNum"] - dropTblNum) - - if not (totalConsumeRows < expectrowcnt and totalConsumeRows > remaindrowcnt): - tdLog.info("act consume rows: %d, expect consume rows: between %d and %d"%(totalConsumeRows, remaindrowcnt, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 3 end ...... ") - - def tmqCase4(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 4: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db4', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 4 end ...... ") - - def tmqCase5(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 5: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db5', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != (expectrowcnt * (1 + 1/4)): - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 5 end ...... ") - - def tmqCase6(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 6: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db6', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 6 end ...... ") - - def tmqCase7(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 7: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db7', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 7 end ...... ") - - def tmqCase8(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 8: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db8', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 8 end ...... ") - - def tmqCase9(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 9: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db9', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 0 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 9 end ...... ") - - def tmqCase10(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 10: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db10', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 1 - self.insertConsumerInfo(consumerId, expectrowcnt-10000,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt-10000: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt-10000)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.initConsumerInfoTable() - consumerId = 2 - ifManualCommit = 1 - self.insertConsumerInfo(consumerId, expectrowcnt+10000,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 10 end ...... ") - - def tmqCase11(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 11: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db11', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 11 end ...... ") - - def tmqCase12(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 12: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db12', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 12 end ...... ") - - def tmqCase13(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 13: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db13', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt/2,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*(1/2+1/4): - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*(1/2+1/4))) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 2 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 13 end ...... ") - def run(self): tdSql.prepare() @@ -1377,17 +359,6 @@ class TDTestCase: self.tmqCase1(cfgPath, buildPath) self.tmqCase2(cfgPath, buildPath) - self.tmqCase3(cfgPath, buildPath) - self.tmqCase4(cfgPath, buildPath) - self.tmqCase5(cfgPath, buildPath) - #self.tmqCase6(cfgPath, buildPath) - #self.tmqCase7(cfgPath, buildPath) - #self.tmqCase8(cfgPath, buildPath) - #self.tmqCase9(cfgPath, buildPath) - #self.tmqCase10(cfgPath, buildPath) - #self.tmqCase11(cfgPath, buildPath) - #self.tmqCase12(cfgPath, buildPath) - #self.tmqCase13(cfgPath, buildPath) def stop(self): tdSql.close() diff --git a/tests/system-test/7-tmq/subscribeStb0.py b/tests/system-test/7-tmq/subscribeStb0.py new file mode 100644 index 0000000000000000000000000000000000000000..a212cf759066f4cc67bec18800e6b9581013ab0e --- /dev/null +++ b/tests/system-test/7-tmq/subscribeStb0.py @@ -0,0 +1,444 @@ + +import taos +import sys +import time +import socket +import os +import threading +from enum import Enum + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class actionType(Enum): + CREATE_DATABASE = 0 + CREATE_STABLE = 1 + CREATE_CTABLE = 2 + INSERT_DATA = 3 + +class TDTestCase: + hostname = socket.gethostname() + #rpcDebugFlagVal = '143' + #clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #print ("===================: ", updatecfgDict) + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def newcur(self,cfg,host,port): + user = "root" + password = "taosdata" + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) + cur=con.cursor() + print(cur) + return cur + + def initConsumerTable(self,cdbName='cdb'): + tdLog.info("create consume database, and consume info table, and consume result table") + tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) + + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) + + def initConsumerInfoTable(self,cdbName='cdb'): + tdLog.info("drop consumeinfo table") + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) + tdLog.info("consume info sql: %s"%sql) + tdSql.query(sql) + + def selectConsumeResult(self,expectRows,cdbName='cdb'): + resultList=[] + while 1: + tdSql.query("select * from %s.consumeresult"%cdbName) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == expectRows: + break + else: + time.sleep(5) + + for i in range(expectRows): + tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) + resultList.append(tdSql.getData(i , 3)) + + return resultList + + def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): + shellCmd = 'nohup ' + if valgrind == 1: + logFile = cfgPath + '/../log/valgrind-tmq.log' + shellCmd = 'nohup valgrind --log-file=' + logFile + shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' + + shellCmd += buildPath + '/build/bin/tmq_sim -c ' + cfgPath + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> /dev/null 2>&1 &" + tdLog.info(shellCmd) + os.system(shellCmd) + + def create_database(self,tsql, dbName,dropFlag=1,vgroups=4,replica=1): + if dropFlag == 1: + tsql.execute("drop database if exists %s"%(dbName)) + + tsql.execute("create database if not exists %s vgroups %d replica %d"%(dbName, vgroups, replica)) + tdLog.debug("complete to create database %s"%(dbName)) + return + + def create_stable(self,tsql, dbName,stbName): + tsql.execute("create table if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(dbName, stbName)) + tdLog.debug("complete to create %s.%s" %(dbName, stbName)) + return + + def create_ctables(self,tsql, dbName,stbName,ctbNum): + tsql.execute("use %s" %dbName) + pre_create = "create table" + sql = pre_create + #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + for i in range(ctbNum): + sql += " %s_%d using %s tags(%d)"%(stbName,i,stbName,i+1) + if (i > 0) and (i%100 == 0): + tsql.execute(sql) + sql = pre_create + if sql != pre_create: + tsql.execute(sql) + + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) + return + + def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs=0): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + if startTs == 0: + t = time.time() + startTs = int(round(t * 1000)) + + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + rowsOfSql = 0 + for i in range(ctbNum): + sql += " %s_%d values "%(stbName,i) + for j in range(rowsPerTbl): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j) + rowsOfSql += 1 + if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + rowsOfSql = 0 + if j < rowsPerTbl - 1: + sql = "insert into %s_%d values " %(stbName,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def prepareEnv(self, **parameterDict): + # create new connector for my thread + tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) + + if parameterDict["actionType"] == actionType.CREATE_DATABASE: + self.create_database(tsql, parameterDict["dbName"]) + elif parameterDict["actionType"] == actionType.CREATE_STABLE: + self.create_stable(tsql, parameterDict["dbName"], parameterDict["stbName"]) + elif parameterDict["actionType"] == actionType.CREATE_CTABLE: + self.create_ctables(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + elif parameterDict["actionType"] == actionType.INSERT_DATA: + self.insert_data(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + else: + tdLog.exit("not support's action: ", parameterDict["actionType"]) + + return + + def tmqCase3(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 3: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db3', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 30000, \ + 'batchNum': 50, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + time.sleep(1.5) + tdLog.info("drop som child table of stb1") + dropTblNum = 4 + tdSql.query("drop table if exists %s.%s_1"%(parameterDict["dbName"], parameterDict["stbName"])) + tdSql.query("drop table if exists %s.%s_2"%(parameterDict["dbName"], parameterDict["stbName"])) + tdSql.query("drop table if exists %s.%s_3"%(parameterDict["dbName"], parameterDict["stbName"])) + tdSql.query("drop table if exists %s.%s_4"%(parameterDict["dbName"], parameterDict["stbName"])) + + tdLog.info("drop some child tables, then start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + remaindrowcnt = parameterDict["rowsPerTbl"] * (parameterDict["ctbNum"] - dropTblNum) + + tdLog.info("act consume rows: %d, expect consume rows: between %d and %d"%(totalConsumeRows, remaindrowcnt, expectrowcnt)) + if not (totalConsumeRows <= expectrowcnt and totalConsumeRows >= remaindrowcnt): + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 3 end ...... ") + + def tmqCase4(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 4: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db4', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt/4: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) + tdLog.exit("tmq consume rows error!") + + self.initConsumerInfoTable() + consumerId = 1 + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("again start consume processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("again check consume result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 4 end ...... ") + + def tmqCase5(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 5: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db5', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt/4: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) + tdLog.exit("tmq consume rows error!") + + self.initConsumerInfoTable() + consumerId = 1 + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("again start consume processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("again check consume result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != (expectrowcnt * (1 + 1/4)): + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 5 end ...... ") + + def run(self): + tdSql.prepare() + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + cfgPath = buildPath + "/../sim/psim/cfg" + tdLog.info("cfgPath: %s" % cfgPath) + + self.tmqCase3(cfgPath, buildPath) + self.tmqCase4(cfgPath, buildPath) + self.tmqCase5(cfgPath, buildPath) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/subscribeStb1.py b/tests/system-test/7-tmq/subscribeStb1.py index 049b297d2df0f54e7d681c6236d942340da2d19f..92347690d9a14f35e50ac11e18c51daa7fb1f716 100644 --- a/tests/system-test/7-tmq/subscribeStb1.py +++ b/tests/system-test/7-tmq/subscribeStb1.py @@ -29,8 +29,8 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - #tdSql.init(conn.cursor()) - tdSql.init(conn.cursor(), logSql) # output sql.txt file + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -183,248 +183,15 @@ class TDTestCase: return - def tmqCase1(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 1: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db1', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 100 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - time.sleep(5) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("insert process end, and start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 1 end ...... ") - - def tmqCase2(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 2: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db2', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - parameterDict2 = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db2', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb2', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict2['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_stable(tdSql, parameterDict2["dbName"], parameterDict2["stbName"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 100 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start create child tables of stb1 and stb2") - parameterDict['actionType'] = actionType.CREATE_CTABLE - parameterDict2['actionType'] = actionType.CREATE_CTABLE - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - prepareEnvThread.join() - prepareEnvThread2.join() - - tdLog.info("start insert data into child tables of stb1 and stb2") - parameterDict['actionType'] = actionType.INSERT_DATA - parameterDict2['actionType'] = actionType.INSERT_DATA - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - prepareEnvThread.join() - prepareEnvThread2.join() - - tdLog.info("insert process end, and start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 2 end ...... ") - - def tmqCase3(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 3: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db3', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 13, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - time.sleep(2) - tdLog.info("drop som child table of stb1") - dropTblNum = 4 - tdSql.query("drop table if exists %s.%s_9"%(parameterDict["dbName"], parameterDict["stbName"])) - tdSql.query("drop table if exists %s.%s_8"%(parameterDict["dbName"], parameterDict["stbName"])) - tdSql.query("drop table if exists %s.%s_7"%(parameterDict["dbName"], parameterDict["stbName"])) - tdSql.query("drop table if exists %s.%s_3"%(parameterDict["dbName"], parameterDict["stbName"])) - - tdLog.info("drop some child tables, then start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - remaindrowcnt = parameterDict["rowsPerTbl"] * (parameterDict["ctbNum"] - dropTblNum) - - if not (totalConsumeRows < expectrowcnt and totalConsumeRows > remaindrowcnt): - tdLog.info("act consume rows: %d, expect consume rows: between %d and %d"%(totalConsumeRows, remaindrowcnt, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 3 end ...... ") - - def tmqCase4(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 4: ") + def tmqCase6(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 6: ") self.initConsumerTable() # create and start thread parameterDict = {'cfg': '', \ 'actionType': 0, \ - 'dbName': 'db4', \ + 'dbName': 'db6', \ 'dropFlag': 1, \ 'vgroups': 4, \ 'replica': 1, \ @@ -461,7 +228,7 @@ class TDTestCase: self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -479,6 +246,10 @@ class TDTestCase: self.initConsumerInfoTable() consumerId = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:latest' self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("again start consume processor") @@ -497,17 +268,17 @@ class TDTestCase: tdSql.query("drop topic %s"%topicFromStb1) - tdLog.printNoPrefix("======== test case 4 end ...... ") + tdLog.printNoPrefix("======== test case 6 end ...... ") - def tmqCase5(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 5: ") + def tmqCase7(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 7: ") self.initConsumerTable() # create and start thread parameterDict = {'cfg': '', \ 'actionType': 0, \ - 'dbName': 'db5', \ + 'dbName': 'db7', \ 'dropFlag': 1, \ 'vgroups': 4, \ 'replica': 1, \ @@ -536,15 +307,15 @@ class TDTestCase: expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] topicList = topicFromStb1 ifcheckdata = 0 - ifManualCommit = 0 + ifManualCommit = 1 keyList = 'group.id:cgrp1,\ enable.auto.commit:false,\ auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) + auto.offset.reset:latest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -556,8 +327,8 @@ class TDTestCase: for i in range(expectRows): totalConsumeRows += resultList[i] - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) + if totalConsumeRows != 0: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) tdLog.exit("tmq consume rows error!") self.initConsumerInfoTable() @@ -574,796 +345,14 @@ class TDTestCase: for i in range(expectRows): totalConsumeRows += resultList[i] - if totalConsumeRows != (expectrowcnt * (1 + 1/4)): - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 5 end ...... ") - - def tmqCase6(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 6: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db6', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 6 end ...... ") - - def tmqCase7(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 7: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db7', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) + if totalConsumeRows != 0: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) tdLog.exit("tmq consume rows error!") tdSql.query("drop topic %s"%topicFromStb1) tdLog.printNoPrefix("======== test case 7 end ...... ") - def tmqCase8(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 8: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db8', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 8 end ...... ") - - def tmqCase9(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 9: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db9', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 0 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 9 end ...... ") - - def tmqCase10(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 10: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db10', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 1 - self.insertConsumerInfo(consumerId, expectrowcnt-10000,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt-10000: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt-10000)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.initConsumerInfoTable() - consumerId = 2 - ifManualCommit = 1 - self.insertConsumerInfo(consumerId, expectrowcnt+10000,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 10 end ...... ") - - def tmqCase11(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 11: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db11', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 11 end ...... ") - - def tmqCase12(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 12: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db12', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 12 end ...... ") - - def tmqCase13(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 13: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db13', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt/2,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*(1/2+1/4): - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*(1/2+1/4))) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 2 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 13 end ...... ") - def run(self): tdSql.prepare() @@ -1375,19 +364,8 @@ class TDTestCase: cfgPath = buildPath + "/../sim/psim/cfg" tdLog.info("cfgPath: %s" % cfgPath) - #self.tmqCase1(cfgPath, buildPath) - #self.tmqCase2(cfgPath, buildPath) - #self.tmqCase3(cfgPath, buildPath) - #self.tmqCase4(cfgPath, buildPath) - #self.tmqCase5(cfgPath, buildPath) self.tmqCase6(cfgPath, buildPath) self.tmqCase7(cfgPath, buildPath) - self.tmqCase8(cfgPath, buildPath) - self.tmqCase9(cfgPath, buildPath) - self.tmqCase10(cfgPath, buildPath) - self.tmqCase11(cfgPath, buildPath) - self.tmqCase12(cfgPath, buildPath) - self.tmqCase13(cfgPath, buildPath) def stop(self): tdSql.close() diff --git a/tests/system-test/7-tmq/subscribeStb2.py b/tests/system-test/7-tmq/subscribeStb2.py new file mode 100644 index 0000000000000000000000000000000000000000..d08adcdc8374d01a0f91dfd596b2de6521d86f84 --- /dev/null +++ b/tests/system-test/7-tmq/subscribeStb2.py @@ -0,0 +1,421 @@ + +import taos +import sys +import time +import socket +import os +import threading +from enum import Enum + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class actionType(Enum): + CREATE_DATABASE = 0 + CREATE_STABLE = 1 + CREATE_CTABLE = 2 + INSERT_DATA = 3 + +class TDTestCase: + hostname = socket.gethostname() + #rpcDebugFlagVal = '143' + #clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #print ("===================: ", updatecfgDict) + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def newcur(self,cfg,host,port): + user = "root" + password = "taosdata" + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) + cur=con.cursor() + print(cur) + return cur + + def initConsumerTable(self,cdbName='cdb'): + tdLog.info("create consume database, and consume info table, and consume result table") + tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) + + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) + + def initConsumerInfoTable(self,cdbName='cdb'): + tdLog.info("drop consumeinfo table") + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) + tdLog.info("consume info sql: %s"%sql) + tdSql.query(sql) + + def selectConsumeResult(self,expectRows,cdbName='cdb'): + resultList=[] + while 1: + tdSql.query("select * from %s.consumeresult"%cdbName) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == expectRows: + break + else: + time.sleep(5) + + for i in range(expectRows): + tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) + resultList.append(tdSql.getData(i , 3)) + + return resultList + + def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): + shellCmd = 'nohup ' + if valgrind == 1: + logFile = cfgPath + '/../log/valgrind-tmq.log' + shellCmd = 'nohup valgrind --log-file=' + logFile + shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' + + shellCmd += buildPath + '/build/bin/tmq_sim -c ' + cfgPath + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> /dev/null 2>&1 &" + tdLog.info(shellCmd) + os.system(shellCmd) + + def create_database(self,tsql, dbName,dropFlag=1,vgroups=4,replica=1): + if dropFlag == 1: + tsql.execute("drop database if exists %s"%(dbName)) + + tsql.execute("create database if not exists %s vgroups %d replica %d"%(dbName, vgroups, replica)) + tdLog.debug("complete to create database %s"%(dbName)) + return + + def create_stable(self,tsql, dbName,stbName): + tsql.execute("create table if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(dbName, stbName)) + tdLog.debug("complete to create %s.%s" %(dbName, stbName)) + return + + def create_ctables(self,tsql, dbName,stbName,ctbNum): + tsql.execute("use %s" %dbName) + pre_create = "create table" + sql = pre_create + #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + for i in range(ctbNum): + sql += " %s_%d using %s tags(%d)"%(stbName,i,stbName,i+1) + if (i > 0) and (i%100 == 0): + tsql.execute(sql) + sql = pre_create + if sql != pre_create: + tsql.execute(sql) + + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) + return + + def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs=0): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + if startTs == 0: + t = time.time() + startTs = int(round(t * 1000)) + + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + rowsOfSql = 0 + for i in range(ctbNum): + sql += " %s_%d values "%(stbName,i) + for j in range(rowsPerTbl): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j) + rowsOfSql += 1 + if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + rowsOfSql = 0 + if j < rowsPerTbl - 1: + sql = "insert into %s_%d values " %(stbName,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def prepareEnv(self, **parameterDict): + # create new connector for my thread + tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) + + if parameterDict["actionType"] == actionType.CREATE_DATABASE: + self.create_database(tsql, parameterDict["dbName"]) + elif parameterDict["actionType"] == actionType.CREATE_STABLE: + self.create_stable(tsql, parameterDict["dbName"], parameterDict["stbName"]) + elif parameterDict["actionType"] == actionType.CREATE_CTABLE: + self.create_ctables(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + elif parameterDict["actionType"] == actionType.INSERT_DATA: + self.insert_data(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + else: + tdLog.exit("not support's action: ", parameterDict["actionType"]) + + return + + def tmqCase8(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 8: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db8', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:latest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume 0 processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start to check consume 0 result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != 0: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) + tdLog.exit("tmq consume rows error!") + + tdLog.info("start consume 1 processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start one new thread to insert data") + parameterDict['actionType'] = actionType.INSERT_DATA + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + prepareEnvThread.join() + + tdLog.info("start to check consume 0 and 1 result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdLog.info("start consume 2 processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start one new thread to insert data") + parameterDict['actionType'] = actionType.INSERT_DATA + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + prepareEnvThread.join() + + tdLog.info("start to check consume 0 and 1 and 2 result") + expectRows = 3 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt*2: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 8 end ...... ") + + def tmqCase9(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 9: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db9', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:latest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume 0 processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start to check consume 0 result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != 0: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) + tdLog.exit("tmq consume rows error!") + + tdLog.info("start consume 1 processor") + self.initConsumerInfoTable() + consumerId = 1 + ifManualCommit = 0 + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start one new thread to insert data") + parameterDict['actionType'] = actionType.INSERT_DATA + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + prepareEnvThread.join() + + tdLog.info("start to check consume 0 and 1 result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdLog.info("start consume 2 processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start one new thread to insert data") + parameterDict['actionType'] = actionType.INSERT_DATA + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + prepareEnvThread.join() + + tdLog.info("start to check consume 0 and 1 and 2 result") + expectRows = 3 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt*2: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 9 end ...... ") + + def run(self): + tdSql.prepare() + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + cfgPath = buildPath + "/../sim/psim/cfg" + tdLog.info("cfgPath: %s" % cfgPath) + + self.tmqCase8(cfgPath, buildPath) + self.tmqCase9(cfgPath, buildPath) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/subscribeStb3.py b/tests/system-test/7-tmq/subscribeStb3.py new file mode 100644 index 0000000000000000000000000000000000000000..58e36911c1407add56a5ef023364f5925e2629b1 --- /dev/null +++ b/tests/system-test/7-tmq/subscribeStb3.py @@ -0,0 +1,607 @@ + +import taos +import sys +import time +import socket +import os +import threading +from enum import Enum + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class actionType(Enum): + CREATE_DATABASE = 0 + CREATE_STABLE = 1 + CREATE_CTABLE = 2 + INSERT_DATA = 3 + +class TDTestCase: + hostname = socket.gethostname() + #rpcDebugFlagVal = '143' + #clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #print ("===================: ", updatecfgDict) + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def newcur(self,cfg,host,port): + user = "root" + password = "taosdata" + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) + cur=con.cursor() + print(cur) + return cur + + def initConsumerTable(self,cdbName='cdb'): + tdLog.info("create consume database, and consume info table, and consume result table") + tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) + + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) + + def initConsumerInfoTable(self,cdbName='cdb'): + tdLog.info("drop consumeinfo table") + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) + tdLog.info("consume info sql: %s"%sql) + tdSql.query(sql) + + def selectConsumeResult(self,expectRows,cdbName='cdb'): + resultList=[] + while 1: + tdSql.query("select * from %s.consumeresult"%cdbName) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == expectRows: + break + else: + time.sleep(5) + + for i in range(expectRows): + tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) + resultList.append(tdSql.getData(i , 3)) + + return resultList + + def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): + shellCmd = 'nohup ' + if valgrind == 1: + logFile = cfgPath + '/../log/valgrind-tmq.log' + shellCmd = 'nohup valgrind --log-file=' + logFile + shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' + + shellCmd += buildPath + '/build/bin/tmq_sim -c ' + cfgPath + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> /dev/null 2>&1 &" + tdLog.info(shellCmd) + os.system(shellCmd) + + def create_database(self,tsql, dbName,dropFlag=1,vgroups=4,replica=1): + if dropFlag == 1: + tsql.execute("drop database if exists %s"%(dbName)) + + tsql.execute("create database if not exists %s vgroups %d replica %d"%(dbName, vgroups, replica)) + tdLog.debug("complete to create database %s"%(dbName)) + return + + def create_stable(self,tsql, dbName,stbName): + tsql.execute("create table if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(dbName, stbName)) + tdLog.debug("complete to create %s.%s" %(dbName, stbName)) + return + + def create_ctables(self,tsql, dbName,stbName,ctbNum): + tsql.execute("use %s" %dbName) + pre_create = "create table" + sql = pre_create + #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + for i in range(ctbNum): + sql += " %s_%d using %s tags(%d)"%(stbName,i,stbName,i+1) + if (i > 0) and (i%100 == 0): + tsql.execute(sql) + sql = pre_create + if sql != pre_create: + tsql.execute(sql) + + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) + return + + def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs=0): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + if startTs == 0: + t = time.time() + startTs = int(round(t * 1000)) + + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + rowsOfSql = 0 + for i in range(ctbNum): + sql += " %s_%d values "%(stbName,i) + for j in range(rowsPerTbl): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j) + rowsOfSql += 1 + if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + rowsOfSql = 0 + if j < rowsPerTbl - 1: + sql = "insert into %s_%d values " %(stbName,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def prepareEnv(self, **parameterDict): + # create new connector for my thread + tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) + + if parameterDict["actionType"] == actionType.CREATE_DATABASE: + self.create_database(tsql, parameterDict["dbName"]) + elif parameterDict["actionType"] == actionType.CREATE_STABLE: + self.create_stable(tsql, parameterDict["dbName"], parameterDict["stbName"]) + elif parameterDict["actionType"] == actionType.CREATE_CTABLE: + self.create_ctables(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + elif parameterDict["actionType"] == actionType.INSERT_DATA: + self.insert_data(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + else: + tdLog.exit("not support's action: ", parameterDict["actionType"]) + + return + + def tmqCase10(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 10: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db10', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:latest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume 0 processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start to check consume 0 result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != 0: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) + tdLog.exit("tmq consume rows error!") + + tdLog.info("start consume 1 processor") + self.initConsumerInfoTable() + consumerId = 1 + ifManualCommit = 1 + self.insertConsumerInfo(consumerId, expectrowcnt-10000,topicList,keyList,ifcheckdata,ifManualCommit) + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start one new thread to insert data") + parameterDict['actionType'] = actionType.INSERT_DATA + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + prepareEnvThread.join() + + tdLog.info("start to check consume 0 and 1 result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt-10000: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt-10000)) + tdLog.exit("tmq consume rows error!") + + tdLog.info("start consume 2 processor") + self.initConsumerInfoTable() + consumerId = 2 + ifManualCommit = 1 + self.insertConsumerInfo(consumerId, expectrowcnt+10000,topicList,keyList,ifcheckdata,ifManualCommit) + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start one new thread to insert data") + parameterDict['actionType'] = actionType.INSERT_DATA + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + prepareEnvThread.join() + + tdLog.info("start to check consume 0 and 1 and 2 result") + expectRows = 3 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt*2: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 10 end ...... ") + + def tmqCase11(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 11: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db11', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:none' + self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != 0: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) + tdLog.exit("tmq consume rows error!") + + self.initConsumerInfoTable() + consumerId = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:none' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("again start consume processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("again check consume result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != 0: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 11 end ...... ") + + def tmqCase12(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 12: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db12', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt/4: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) + tdLog.exit("tmq consume rows error!") + + self.initConsumerInfoTable() + consumerId = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:none' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("again start consume processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("again check consume result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt/4: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 12 end ...... ") + + def tmqCase13(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 13: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db13', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt/4: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) + tdLog.exit("tmq consume rows error!") + + self.initConsumerInfoTable() + consumerId = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:none' + self.insertConsumerInfo(consumerId, expectrowcnt/2,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("again start consume processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("again check consume result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt*(1/2+1/4): + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*(1/2+1/4))) + tdLog.exit("tmq consume rows error!") + + self.initConsumerInfoTable() + consumerId = 2 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:none' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("again start consume processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("again check consume result") + expectRows = 3 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 13 end ...... ") + + def run(self): + tdSql.prepare() + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + cfgPath = buildPath + "/../sim/psim/cfg" + tdLog.info("cfgPath: %s" % cfgPath) + + self.tmqCase10(cfgPath, buildPath) + self.tmqCase11(cfgPath, buildPath) + self.tmqCase12(cfgPath, buildPath) + self.tmqCase13(cfgPath, buildPath) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/subscribeStb4.py b/tests/system-test/7-tmq/subscribeStb4.py new file mode 100644 index 0000000000000000000000000000000000000000..d06e14479667d172a2a7cc42f8019957d131f749 --- /dev/null +++ b/tests/system-test/7-tmq/subscribeStb4.py @@ -0,0 +1,351 @@ + +import taos +import sys +import time +import socket +import os +import threading +from enum import Enum + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class actionType(Enum): + CREATE_DATABASE = 0 + CREATE_STABLE = 1 + CREATE_CTABLE = 2 + INSERT_DATA = 3 + +class TDTestCase: + hostname = socket.gethostname() + #rpcDebugFlagVal = '143' + #clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #print ("===================: ", updatecfgDict) + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def newcur(self,cfg,host,port): + user = "root" + password = "taosdata" + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) + cur=con.cursor() + print(cur) + return cur + + def initConsumerTable(self,cdbName='cdb'): + tdLog.info("create consume database, and consume info table, and consume result table") + tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) + + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) + + def initConsumerInfoTable(self,cdbName='cdb'): + tdLog.info("drop consumeinfo table") + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) + tdLog.info("consume info sql: %s"%sql) + tdSql.query(sql) + + def selectConsumeResult(self,expectRows,cdbName='cdb'): + resultList=[] + while 1: + tdSql.query("select * from %s.consumeresult"%cdbName) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == expectRows: + break + else: + time.sleep(5) + + for i in range(expectRows): + tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) + resultList.append(tdSql.getData(i , 3)) + + return resultList + + def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): + shellCmd = 'nohup ' + if valgrind == 1: + logFile = cfgPath + '/../log/valgrind-tmq.log' + shellCmd = 'nohup valgrind --log-file=' + logFile + shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' + + shellCmd += buildPath + '/build/bin/tmq_sim -c ' + cfgPath + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> /dev/null 2>&1 &" + tdLog.info(shellCmd) + os.system(shellCmd) + + def create_database(self,tsql, dbName,dropFlag=1,vgroups=4,replica=1): + if dropFlag == 1: + tsql.execute("drop database if exists %s"%(dbName)) + + tsql.execute("create database if not exists %s vgroups %d replica %d"%(dbName, vgroups, replica)) + tdLog.debug("complete to create database %s"%(dbName)) + return + + def create_stable(self,tsql, dbName,stbName): + tsql.execute("create table if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(dbName, stbName)) + tdLog.debug("complete to create %s.%s" %(dbName, stbName)) + return + + def create_ctables(self,tsql, dbName,stbName,ctbNum): + tsql.execute("use %s" %dbName) + pre_create = "create table" + sql = pre_create + #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + for i in range(ctbNum): + sql += " %s_%d using %s tags(%d)"%(stbName,i,stbName,i+1) + if (i > 0) and (i%100 == 0): + tsql.execute(sql) + sql = pre_create + if sql != pre_create: + tsql.execute(sql) + + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) + return + + def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs=0): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + if startTs == 0: + t = time.time() + startTs = int(round(t * 1000)) + + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + rowsOfSql = 0 + for i in range(ctbNum): + sql += " %s_%d values "%(stbName,i) + for j in range(rowsPerTbl): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j) + rowsOfSql += 1 + if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + rowsOfSql = 0 + if j < rowsPerTbl - 1: + sql = "insert into %s_%d values " %(stbName,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def prepareEnv(self, **parameterDict): + # create new connector for my thread + tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) + + if parameterDict["actionType"] == actionType.CREATE_DATABASE: + self.create_database(tsql, parameterDict["dbName"]) + elif parameterDict["actionType"] == actionType.CREATE_STABLE: + self.create_stable(tsql, parameterDict["dbName"], parameterDict["stbName"]) + elif parameterDict["actionType"] == actionType.CREATE_CTABLE: + self.create_ctables(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + elif parameterDict["actionType"] == actionType.INSERT_DATA: + self.insert_data(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + else: + tdLog.exit("not support's action: ", parameterDict["actionType"]) + + return + + def tmqCase1(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 1: ") + + self.initConsumerTable() + + auotCtbNum = 5 + auotCtbPrefix = 'autoCtb' + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db1', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * (auotCtbNum + parameterDict["ctbNum"]) + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + # add some new child tables using auto ctreating mode + time.sleep(1) + for index in range(auotCtbNum): + tdSql.query("create table %s.%s_%d using %s.%s tags(%d)"%(parameterDict["dbName"], auotCtbPrefix, index, parameterDict["dbName"], parameterDict["stbName"], index)) + + self.insert_data(tdSql,parameterDict["dbName"],auotCtbPrefix,auotCtbNum,parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + + tdLog.info("insert process end, and start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def tmqCase2(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 2: ") + + self.initConsumerTable() + + auotCtbNum = 10 + auotCtbPrefix = 'autoCtb' + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db2', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + + self.create_stable(tdSql, parameterDict["dbName"], 'stb2') + + tdLog.info("create topics from stb0/stb1") + topicFromStb1 = 'topic_stb1' + topicFromStb2 = 'topic_stb2' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb2, parameterDict['dbName'], 'stb2')) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * (auotCtbNum + parameterDict["ctbNum"]) + topicList = '%s, %s'%(topicFromStb1,topicFromStb2) + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + # add some new child tables using auto ctreating mode + time.sleep(1) + for index in range(auotCtbNum): + tdSql.query("create table %s.%s_%d using %s.%s tags(%d)"%(parameterDict["dbName"], auotCtbPrefix, index, parameterDict["dbName"], 'stb2', index)) + + self.insert_data(tdSql,parameterDict["dbName"],auotCtbPrefix,auotCtbNum,parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + + tdLog.info("insert process end, and start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 2 end ...... ") + + def run(self): + tdSql.prepare() + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + cfgPath = buildPath + "/../sim/psim/cfg" + tdLog.info("cfgPath: %s" % cfgPath) + + self.tmqCase1(cfgPath, buildPath) + self.tmqCase2(cfgPath, buildPath) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/tmqDnode.py b/tests/system-test/7-tmq/tmqDnode.py new file mode 100644 index 0000000000000000000000000000000000000000..bb287134b12010a6697e437622ec1ddcff11e7b9 --- /dev/null +++ b/tests/system-test/7-tmq/tmqDnode.py @@ -0,0 +1,481 @@ + +import taos +import sys +import time +import socket +import os +import threading +from enum import Enum + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class actionType(Enum): + CREATE_DATABASE = 0 + CREATE_STABLE = 1 + CREATE_CTABLE = 2 + INSERT_DATA = 3 + +class TDTestCase: + hostname = socket.gethostname() + #rpcDebugFlagVal = '143' + #clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #print ("===================: ", updatecfgDict) + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def newcur(self,cfg,host,port): + user = "root" + password = "taosdata" + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) + cur=con.cursor() + print(cur) + return cur + + def initConsumerTable(self,cdbName='cdb'): + tdLog.info("create consume database, and consume info table, and consume result table") + tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) + + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) + + def initConsumerInfoTable(self,cdbName='cdb'): + tdLog.info("drop consumeinfo table") + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) + tdLog.info("consume info sql: %s"%sql) + tdSql.query(sql) + + def selectConsumeResult(self,expectRows,cdbName='cdb'): + resultList=[] + while 1: + tdSql.query("select * from %s.consumeresult"%cdbName) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == expectRows: + break + else: + time.sleep(5) + + for i in range(expectRows): + tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) + resultList.append(tdSql.getData(i , 3)) + + return resultList + + def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): + shellCmd = 'nohup ' + if valgrind == 1: + logFile = cfgPath + '/../log/valgrind-tmq.log' + shellCmd = 'nohup valgrind --log-file=' + logFile + shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' + + shellCmd += buildPath + '/build/bin/tmq_sim -c ' + cfgPath + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> /dev/null 2>&1 &" + tdLog.info(shellCmd) + os.system(shellCmd) + + def create_database(self,tsql, dbName,dropFlag=1,vgroups=4,replica=1): + if dropFlag == 1: + tsql.execute("drop database if exists %s"%(dbName)) + + tsql.execute("create database if not exists %s vgroups %d replica %d"%(dbName, vgroups, replica)) + tdLog.debug("complete to create database %s"%(dbName)) + return + + def create_stable(self,tsql, dbName,stbName): + tsql.execute("create table if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(dbName, stbName)) + tdLog.debug("complete to create %s.%s" %(dbName, stbName)) + return + + def create_ctables(self,tsql, dbName,stbName,ctbPrefix,ctbNum): + tsql.execute("use %s" %dbName) + pre_create = "create table" + sql = pre_create + #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + for i in range(ctbNum): + sql += " %s_%d using %s tags(%d)"%(ctbPrefix,i,stbName,i+1) + if (i > 0) and (i%100 == 0): + tsql.execute(sql) + sql = pre_create + if sql != pre_create: + tsql.execute(sql) + + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) + return + + def insert_data_interlaceByMultiTbl(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs=0): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + if startTs == 0: + t = time.time() + startTs = int(round(t * 1000)) + + ctbDict = {} + for i in range(ctbNum): + ctbDict[i] = 0 + + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + rowsOfCtb = 0 + while rowsOfCtb < rowsPerTbl: + for i in range(ctbNum): + sql += " %s.%s_%d values "%(dbName,ctbPrefix,i) + for k in range(batchNum): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + ctbDict[i], ctbDict[i], ctbDict[i]) + ctbDict[i] += 1 + if (0 == ctbDict[i]%batchNum) or (ctbDict[i] == rowsPerTbl): + tsql.execute(sql) + sql = "insert into " + break + rowsOfCtb = ctbDict[0] + + tdLog.debug("insert data ............ [OK]") + return + + def insert_data(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs=0): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + if startTs == 0: + t = time.time() + startTs = int(round(t * 1000)) + + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + rowsOfSql = 0 + for i in range(ctbNum): + sql += " %s_%d values "%(ctbPrefix,i) + for j in range(rowsPerTbl): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j) + rowsOfSql += 1 + if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + rowsOfSql = 0 + if j < rowsPerTbl - 1: + sql = "insert into %s_%d values " %(ctbPrefix,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def insert_data_with_autoCreateTbl(self,tsql,dbName,stbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs=0): + tdLog.debug("start to insert data wiht auto create child table ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + if startTs == 0: + t = time.time() + startTs = int(round(t * 1000)) + + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + rowsOfSql = 0 + for i in range(ctbNum): + sql += " %s.%s_%d using %s.%s tags (%d) values "%(dbName,ctbPrefix,i,dbName,stbName,i) + for j in range(rowsPerTbl): + sql += "(%d, %d, 'autodata_%d') "%(startTs + j, j, j) + rowsOfSql += 1 + if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + rowsOfSql = 0 + if j < rowsPerTbl - 1: + sql = "insert into %s.%s_%d using %s.%s tags (%d) values " %(dbName,ctbPrefix,i,dbName,stbName,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def prepareEnv(self, **parameterDict): + # create new connector for my thread + tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) + + if parameterDict["actionType"] == actionType.CREATE_DATABASE: + self.create_database(tsql, parameterDict["dbName"]) + elif parameterDict["actionType"] == actionType.CREATE_STABLE: + self.create_stable(tsql, parameterDict["dbName"], parameterDict["stbName"]) + elif parameterDict["actionType"] == actionType.CREATE_CTABLE: + self.create_ctables(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + elif parameterDict["actionType"] == actionType.INSERT_DATA: + self.insert_data(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + else: + tdLog.exit("not support's action: ", parameterDict["actionType"]) + + return + + def tmqCase1(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 1: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db1', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbPrefix': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 23, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbPrefix"], parameterDict["ctbNum"]) + self.insert_data_interlaceByMultiTbl(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 10 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + time.sleep(3) + tdLog.info("================= restart dnode ===========================") + tdDnodes.stop(1) + tdDnodes.start(1) + time.sleep(2) + + tdLog.info("insert process end, and start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def tmqCase2(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 2: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db2', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbPrefix': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 40, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbPrefix"], parameterDict["ctbNum"]) + self.insert_data_interlaceByMultiTbl(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] * 2 + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 50 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("create some new child table and insert data ") + parameterDict['batchNum'] = 100 + self.insert_data_with_autoCreateTbl(tdSql,parameterDict["dbName"],parameterDict["stbName"],"ctb",parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + + tdLog.info("================= restart dnode ===========================") + tdDnodes.stop(1) + tdDnodes.start(1) + time.sleep(2) + + tdLog.info("insert process end, and start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 2 end ...... ") + + + + + # 自动建表完成数据插入,启动消费 + def tmqCase3(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 3: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db3', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbPrefix': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 40, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + #self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbPrefix"], parameterDict["ctbNum"]) + #self.insert_data_interlaceByMultiTbl(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + self.insert_data_with_autoCreateTbl(tdSql,parameterDict["dbName"],parameterDict["stbName"],"ctb",parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 10 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + # tdLog.info("================= restart dnode ===========================") + # tdDnodes.stop(1) + # tdDnodes.start(1) + # time.sleep(2) + + tdLog.info("insert process end, and start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 3 end ...... ") + + + def run(self): + tdSql.prepare() + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + cfgPath = buildPath + "/../sim/psim/cfg" + tdLog.info("cfgPath: %s" % cfgPath) + + # self.tmqCase1(cfgPath, buildPath) + # self.tmqCase2(cfgPath, buildPath) + self.tmqCase3(cfgPath, buildPath) + # self.tmqCase4(cfgPath, buildPath) + # self.tmqCase5(cfgPath, buildPath) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/tmqModule.py b/tests/system-test/7-tmq/tmqModule.py new file mode 100644 index 0000000000000000000000000000000000000000..ad5b4d70b35ba1ade92bb00c1903ce02340ebb19 --- /dev/null +++ b/tests/system-test/7-tmq/tmqModule.py @@ -0,0 +1,1446 @@ + +import taos +import sys +import time +import socket +import os +import threading +from enum import Enum + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class actionType(Enum): + CREATE_DATABASE = 0 + CREATE_STABLE = 1 + CREATE_CTABLE = 2 + INSERT_DATA = 3 + +class TDTestCase: + hostname = socket.gethostname() + #rpcDebugFlagVal = '143' + #clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #print ("===================: ", updatecfgDict) + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def newcur(self,cfg,host,port): + user = "root" + password = "taosdata" + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) + cur=con.cursor() + print(cur) + return cur + + def initConsumerTable(self,cdbName='cdb'): + tdLog.info("create consume database, and consume info table, and consume result table") + tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) + + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) + + def initConsumerInfoTable(self,cdbName='cdb'): + tdLog.info("drop consumeinfo table") + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) + tdLog.info("consume info sql: %s"%sql) + tdSql.query(sql) + + def selectConsumeResult(self,expectRows,cdbName='cdb'): + resultList=[] + while 1: + tdSql.query("select * from %s.consumeresult"%cdbName) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == expectRows: + break + else: + time.sleep(5) + + for i in range(expectRows): + tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) + resultList.append(tdSql.getData(i , 3)) + + return resultList + + def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): + shellCmd = 'nohup ' + if valgrind == 1: + logFile = cfgPath + '/../log/valgrind-tmq.log' + shellCmd = 'nohup valgrind --log-file=' + logFile + shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' + + shellCmd += buildPath + '/build/bin/tmq_sim -c ' + cfgPath + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> /dev/null 2>&1 &" + tdLog.info(shellCmd) + os.system(shellCmd) + + def create_database(self,tsql, dbName,dropFlag=1,vgroups=4,replica=1): + if dropFlag == 1: + tsql.execute("drop database if exists %s"%(dbName)) + + tsql.execute("create database if not exists %s vgroups %d replica %d"%(dbName, vgroups, replica)) + tdLog.debug("complete to create database %s"%(dbName)) + return + + def create_stable(self,tsql, dbName,stbName): + tsql.execute("create table if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(dbName, stbName)) + tdLog.debug("complete to create %s.%s" %(dbName, stbName)) + return + + def create_ctables(self,tsql, dbName,stbName,ctbNum): + tsql.execute("use %s" %dbName) + pre_create = "create table" + sql = pre_create + #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + for i in range(ctbNum): + sql += " %s_%d using %s tags(%d)"%(stbName,i,stbName,i+1) + if (i > 0) and (i%100 == 0): + tsql.execute(sql) + sql = pre_create + if sql != pre_create: + tsql.execute(sql) + + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) + return + + def insert_data_interlaceByMultiTbl(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs=0): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + if startTs == 0: + t = time.time() + startTs = int(round(t * 1000)) + + ctbDict = {} + for i in range(ctbNum): + ctbDict[i] = 0 + + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + rowsOfCtb = 0 + while rowsOfCtb < rowsPerTbl: + for i in range(ctbNum): + sql += " %s.%s_%d values "%(dbName,ctbPrefix,i) + for k in range(batchNum): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + ctbDict[i], ctbDict[i], ctbDict[i]) + ctbDict[i] += 1 + if (0 == ctbDict[i]%batchNum) or (ctbDict[i] == rowsPerTbl): + tsql.execute(sql) + sql = "insert into " + break + rowsOfCtb = ctbDict[0] + + tdLog.debug("insert data ............ [OK]") + return + + def insert_data(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs=0): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + if startTs == 0: + t = time.time() + startTs = int(round(t * 1000)) + + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + rowsOfSql = 0 + for i in range(ctbNum): + sql += " %s_%d values "%(ctbPrefix,i) + for j in range(rowsPerTbl): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j) + rowsOfSql += 1 + if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + rowsOfSql = 0 + if j < rowsPerTbl - 1: + sql = "insert into %s_%d values " %(ctbPrefix,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def insert_data_with_autoCreateTbl(self,tsql,dbName,stbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs=0): + tdLog.debug("start to insert data wiht auto create child table ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + if startTs == 0: + t = time.time() + startTs = int(round(t * 1000)) + + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + rowsOfSql = 0 + for i in range(ctbNum): + sql += " %s.%s_%d using %s.%s tags (%d) values "%(dbName,ctbPrefix,i,dbName,stbName,i) + for j in range(rowsPerTbl): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j) + rowsOfSql += 1 + if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + rowsOfSql = 0 + if j < rowsPerTbl - 1: + sql = "insert into %s.%s_%d using %s.%s tags (%d) values " %(dbName,ctbPrefix,i,dbName,stbName,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def prepareEnv(self, **parameterDict): + # create new connector for my thread + tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) + + if parameterDict["actionType"] == actionType.CREATE_DATABASE: + self.create_database(tsql, parameterDict["dbName"]) + elif parameterDict["actionType"] == actionType.CREATE_STABLE: + self.create_stable(tsql, parameterDict["dbName"], parameterDict["stbName"]) + elif parameterDict["actionType"] == actionType.CREATE_CTABLE: + self.create_ctables(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + elif parameterDict["actionType"] == actionType.INSERT_DATA: + self.insert_data(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + else: + tdLog.exit("not support's action: ", parameterDict["actionType"]) + + return + + def tmqCase1(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 1: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db1', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 33, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data_interlaceByMultiTbl(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 10 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("insert process end, and start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def tmqCase2(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 2: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db2', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + parameterDict2 = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db2', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb2', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict2['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_stable(tdSql, parameterDict2["dbName"], parameterDict2["stbName"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start create child tables of stb1 and stb2") + parameterDict['actionType'] = actionType.CREATE_CTABLE + parameterDict2['actionType'] = actionType.CREATE_CTABLE + + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) + prepareEnvThread2.start() + + prepareEnvThread.join() + prepareEnvThread2.join() + + tdLog.info("start insert data into child tables of stb1 and stb2") + parameterDict['actionType'] = actionType.INSERT_DATA + parameterDict2['actionType'] = actionType.INSERT_DATA + + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) + prepareEnvThread2.start() + + prepareEnvThread.join() + prepareEnvThread2.join() + + tdLog.info("insert process end, and start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 2 end ...... ") + + def tmqCase3(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 3: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db3', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 50000, \ + 'batchNum': 13, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 5 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + time.sleep(5) + tdLog.info("drop som child table of stb1") + dropTblNum = 4 + tdSql.query("drop table if exists %s.%s_1"%(parameterDict["dbName"], parameterDict["stbName"])) + tdSql.query("drop table if exists %s.%s_2"%(parameterDict["dbName"], parameterDict["stbName"])) + tdSql.query("drop table if exists %s.%s_3"%(parameterDict["dbName"], parameterDict["stbName"])) + tdSql.query("drop table if exists %s.%s_4"%(parameterDict["dbName"], parameterDict["stbName"])) + + tdLog.info("drop some child tables, then start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + remaindrowcnt = parameterDict["rowsPerTbl"] * (parameterDict["ctbNum"] - dropTblNum) + + if not (totalConsumeRows < expectrowcnt and totalConsumeRows > remaindrowcnt): + tdLog.info("act consume rows: %d, expect consume rows: between %d and %d"%(totalConsumeRows, remaindrowcnt, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 3 end ...... ") + + def tmqCase4(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 4: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db4', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 5 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt/4: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) + tdLog.exit("tmq consume rows error!") + + self.initConsumerInfoTable() + consumerId = 1 + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("again start consume processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("again check consume result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 4 end ...... ") + + def tmqCase5(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 5: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db5', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 5 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt/4: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) + tdLog.exit("tmq consume rows error!") + + self.initConsumerInfoTable() + consumerId = 1 + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("again start consume processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("again check consume result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != (expectrowcnt * (1 + 1/4)): + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 5 end ...... ") + + def tmqCase6(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 6: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db6', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 5 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt/4: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) + tdLog.exit("tmq consume rows error!") + + self.initConsumerInfoTable() + consumerId = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:latest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("again start consume processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("again check consume result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 6 end ...... ") + + def tmqCase7(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 7: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db7', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:latest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 5 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != 0: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) + tdLog.exit("tmq consume rows error!") + + self.initConsumerInfoTable() + consumerId = 1 + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("again start consume processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("again check consume result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != 0: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 7 end ...... ") + + def tmqCase8(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 8: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db8', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:latest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume 0 processor") + pollDelay = 10 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start to check consume 0 result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != 0: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) + tdLog.exit("tmq consume rows error!") + + tdLog.info("start consume 1 processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start one new thread to insert data") + parameterDict['actionType'] = actionType.INSERT_DATA + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + prepareEnvThread.join() + + tdLog.info("start to check consume 0 and 1 result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdLog.info("start consume 2 processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start one new thread to insert data") + parameterDict['actionType'] = actionType.INSERT_DATA + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + prepareEnvThread.join() + + tdLog.info("start to check consume 0 and 1 and 2 result") + expectRows = 3 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt*2: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 8 end ...... ") + + def tmqCase9(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 9: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db9', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:latest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume 0 processor") + pollDelay = 10 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start to check consume 0 result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != 0: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) + tdLog.exit("tmq consume rows error!") + + tdLog.info("start consume 1 processor") + self.initConsumerInfoTable() + consumerId = 1 + ifManualCommit = 0 + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start one new thread to insert data") + parameterDict['actionType'] = actionType.INSERT_DATA + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + prepareEnvThread.join() + + tdLog.info("start to check consume 0 and 1 result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdLog.info("start consume 2 processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start one new thread to insert data") + parameterDict['actionType'] = actionType.INSERT_DATA + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + prepareEnvThread.join() + + tdLog.info("start to check consume 0 and 1 and 2 result") + expectRows = 3 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt*2: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 9 end ...... ") + + def tmqCase10(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 10: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db10', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:latest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume 0 processor") + pollDelay = 10 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start to check consume 0 result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != 0: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) + tdLog.exit("tmq consume rows error!") + + tdLog.info("start consume 1 processor") + self.initConsumerInfoTable() + consumerId = 1 + ifManualCommit = 1 + self.insertConsumerInfo(consumerId, expectrowcnt-10000,topicList,keyList,ifcheckdata,ifManualCommit) + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start one new thread to insert data") + parameterDict['actionType'] = actionType.INSERT_DATA + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + prepareEnvThread.join() + + tdLog.info("start to check consume 0 and 1 result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt-10000: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt-10000)) + tdLog.exit("tmq consume rows error!") + + tdLog.info("start consume 2 processor") + self.initConsumerInfoTable() + consumerId = 2 + ifManualCommit = 1 + self.insertConsumerInfo(consumerId, expectrowcnt+10000,topicList,keyList,ifcheckdata,ifManualCommit) + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start one new thread to insert data") + parameterDict['actionType'] = actionType.INSERT_DATA + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + prepareEnvThread.join() + + tdLog.info("start to check consume 0 and 1 and 2 result") + expectRows = 3 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt*2: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 10 end ...... ") + + def tmqCase11(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 11: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db11', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:none' + self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 5 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != 0: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) + tdLog.exit("tmq consume rows error!") + + self.initConsumerInfoTable() + consumerId = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:none' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("again start consume processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("again check consume result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != 0: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 11 end ...... ") + + def tmqCase12(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 12: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db12', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 5 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt/4: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) + tdLog.exit("tmq consume rows error!") + + self.initConsumerInfoTable() + consumerId = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:none' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("again start consume processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("again check consume result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt/4: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 12 end ...... ") + + def tmqCase13(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 13: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db13', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 5 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt/4: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) + tdLog.exit("tmq consume rows error!") + + self.initConsumerInfoTable() + consumerId = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:none' + self.insertConsumerInfo(consumerId, expectrowcnt/2,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("again start consume processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("again check consume result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt*(1/2+1/4): + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*(1/2+1/4))) + tdLog.exit("tmq consume rows error!") + + self.initConsumerInfoTable() + consumerId = 2 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:none' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("again start consume processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("again check consume result") + expectRows = 3 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 13 end ...... ") + + def run(self): + tdSql.prepare() + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + cfgPath = buildPath + "/../sim/psim/cfg" + tdLog.info("cfgPath: %s" % cfgPath) + + self.tmqCase1(cfgPath, buildPath) + # self.tmqCase2(cfgPath, buildPath) + # self.tmqCase3(cfgPath, buildPath) + # self.tmqCase4(cfgPath, buildPath) + # self.tmqCase5(cfgPath, buildPath) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/99-TDcase/TD-16025.py b/tests/system-test/99-TDcase/TD-16025.py new file mode 100644 index 0000000000000000000000000000000000000000..3a70eaf71bf3f6ccfeb3a97444677bab5838ae0e --- /dev/null +++ b/tests/system-test/99-TDcase/TD-16025.py @@ -0,0 +1,481 @@ + +import taos +import sys +import time +import socket +import os +import threading +from enum import Enum + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class actionType(Enum): + CREATE_DATABASE = 0 + CREATE_STABLE = 1 + CREATE_CTABLE = 2 + INSERT_DATA = 3 + +class TDTestCase: + hostname = socket.gethostname() + #rpcDebugFlagVal = '143' + #clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #print ("===================: ", updatecfgDict) + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + #tdSql.init(conn.cursor()) + tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def newcur(self,cfg,host,port): + user = "root" + password = "taosdata" + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) + cur=con.cursor() + print(cur) + return cur + + def initConsumerTable(self,cdbName='cdb'): + tdLog.info("create consume database, and consume info table, and consume result table") + tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) + + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) + + def initConsumerInfoTable(self,cdbName='cdb'): + tdLog.info("drop consumeinfo table") + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) + tdLog.info("consume info sql: %s"%sql) + tdSql.query(sql) + + def selectConsumeResult(self,expectRows,cdbName='cdb'): + resultList=[] + while 1: + tdSql.query("select * from %s.consumeresult"%cdbName) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == expectRows: + break + else: + time.sleep(5) + + for i in range(expectRows): + tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) + resultList.append(tdSql.getData(i , 3)) + + return resultList + + def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): + shellCmd = 'nohup ' + if valgrind == 1: + logFile = cfgPath + '/../log/valgrind-tmq.log' + shellCmd = 'nohup valgrind --log-file=' + logFile + shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' + + shellCmd += buildPath + '/build/bin/tmq_sim -c ' + cfgPath + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> /dev/null 2>&1 &" + tdLog.info(shellCmd) + os.system(shellCmd) + + def create_database(self,tsql, dbName,dropFlag=1,vgroups=4,replica=1): + if dropFlag == 1: + tsql.execute("drop database if exists %s"%(dbName)) + + tsql.execute("create database if not exists %s vgroups %d replica %d"%(dbName, vgroups, replica)) + tdLog.debug("complete to create database %s"%(dbName)) + return + + def create_stable(self,tsql, dbName,stbName): + tsql.execute("create table if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(dbName, stbName)) + tdLog.debug("complete to create %s.%s" %(dbName, stbName)) + return + + def create_ctables(self,tsql, dbName,stbName,ctbPrefix,ctbNum): + tsql.execute("use %s" %dbName) + pre_create = "create table" + sql = pre_create + #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + for i in range(ctbNum): + sql += " %s_%d using %s tags(%d)"%(ctbPrefix,i,stbName,i+1) + if (i > 0) and (i%100 == 0): + tsql.execute(sql) + sql = pre_create + if sql != pre_create: + tsql.execute(sql) + + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) + return + + def insert_data_interlaceByMultiTbl(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs=0): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + if startTs == 0: + t = time.time() + startTs = int(round(t * 1000)) + + ctbDict = {} + for i in range(ctbNum): + ctbDict[i] = 0 + + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + rowsOfCtb = 0 + while rowsOfCtb < rowsPerTbl: + for i in range(ctbNum): + sql += " %s.%s_%d values "%(dbName,ctbPrefix,i) + for k in range(batchNum): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + ctbDict[i], ctbDict[i], ctbDict[i]) + ctbDict[i] += 1 + if (0 == ctbDict[i]%batchNum) or (ctbDict[i] == rowsPerTbl): + tsql.execute(sql) + sql = "insert into " + break + rowsOfCtb = ctbDict[0] + + tdLog.debug("insert data ............ [OK]") + return + + def insert_data(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs=0): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + if startTs == 0: + t = time.time() + startTs = int(round(t * 1000)) + + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + rowsOfSql = 0 + for i in range(ctbNum): + sql += " %s_%d values "%(ctbPrefix,i) + for j in range(rowsPerTbl): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j) + rowsOfSql += 1 + if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + rowsOfSql = 0 + if j < rowsPerTbl - 1: + sql = "insert into %s_%d values " %(ctbPrefix,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def insert_data_with_autoCreateTbl(self,tsql,dbName,stbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs=0): + tdLog.debug("start to insert data wiht auto create child table ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + if startTs == 0: + t = time.time() + startTs = int(round(t * 1000)) + + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + rowsOfSql = 0 + for i in range(ctbNum): + sql += " %s.%s_%d using %s.%s tags (%d) values "%(dbName,ctbPrefix,i,dbName,stbName,i) + for j in range(rowsPerTbl): + sql += "(%d, %d, 'autodata_%d') "%(startTs + j, j, j) + rowsOfSql += 1 + if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + rowsOfSql = 0 + if j < rowsPerTbl - 1: + sql = "insert into %s.%s_%d using %s.%s tags (%d) values " %(dbName,ctbPrefix,i,dbName,stbName,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def prepareEnv(self, **parameterDict): + # create new connector for my thread + tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) + + if parameterDict["actionType"] == actionType.CREATE_DATABASE: + self.create_database(tsql, parameterDict["dbName"]) + elif parameterDict["actionType"] == actionType.CREATE_STABLE: + self.create_stable(tsql, parameterDict["dbName"], parameterDict["stbName"]) + elif parameterDict["actionType"] == actionType.CREATE_CTABLE: + self.create_ctables(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + elif parameterDict["actionType"] == actionType.INSERT_DATA: + self.insert_data(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + else: + tdLog.exit("not support's action: ", parameterDict["actionType"]) + + return + + def tmqCase1(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 1: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db1', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbPrefix': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 23, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbPrefix"], parameterDict["ctbNum"]) + self.insert_data_interlaceByMultiTbl(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 10 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + time.sleep(3) + tdLog.info("================= restart dnode ===========================") + tdDnodes.stop(1) + tdDnodes.start(1) + time.sleep(2) + + tdLog.info("insert process end, and start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def tmqCase2(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 2: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db2', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbPrefix': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 40, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbPrefix"], parameterDict["ctbNum"]) + self.insert_data_interlaceByMultiTbl(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] * 2 + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 50 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("create some new child table and insert data ") + parameterDict['batchNum'] = 100 + self.insert_data_with_autoCreateTbl(tdSql,parameterDict["dbName"],parameterDict["stbName"],"ctb",parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + + tdLog.info("================= restart dnode ===========================") + tdDnodes.stop(1) + tdDnodes.start(1) + time.sleep(2) + + tdLog.info("insert process end, and start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 2 end ...... ") + + + + + # 自动建表完成数据插入,启动消费 + def tmqCase3(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 3: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db3', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbPrefix': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 40, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + #self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbPrefix"], parameterDict["ctbNum"]) + #self.insert_data_interlaceByMultiTbl(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + self.insert_data_with_autoCreateTbl(tdSql,parameterDict["dbName"],parameterDict["stbName"],"ctb",parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 10 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + # tdLog.info("================= restart dnode ===========================") + # tdDnodes.stop(1) + # tdDnodes.start(1) + # time.sleep(2) + + tdLog.info("insert process end, and start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 3 end ...... ") + + + def run(self): + tdSql.prepare() + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + cfgPath = buildPath + "/../sim/psim/cfg" + tdLog.info("cfgPath: %s" % cfgPath) + + # self.tmqCase1(cfgPath, buildPath) + # self.tmqCase2(cfgPath, buildPath) + self.tmqCase3(cfgPath, buildPath) + # self.tmqCase4(cfgPath, buildPath) + # self.tmqCase5(cfgPath, buildPath) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/fulltest.bat b/tests/system-test/fulltest.bat new file mode 100644 index 0000000000000000000000000000000000000000..871c93c9824333acb6ba05474d9249fb9f8d8ed7 --- /dev/null +++ b/tests/system-test/fulltest.bat @@ -0,0 +1,4 @@ + +python3 .\test.py -f 0-others\taosShell.py +python3 .\test.py -f 0-others\taosShellError.py +python3 .\test.py -f 0-others\taosShellNetChk.py \ No newline at end of file diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh old mode 100755 new mode 100644 index 561114a2840e72475e0d1140b5e793acb909c32e..6331086fb3bb842bd255c41b2388136e6a977770 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -8,11 +8,15 @@ python3 ./test.py -f 0-others/taosShellNetChk.py python3 ./test.py -f 0-others/telemetry.py python3 ./test.py -f 0-others/taosdMonitor.py python3 ./test.py -f 0-others/udfTest.py +python3 ./test.py -f 0-others/udf_create.py +python3 ./test.py -f 0-others/udf_restart_taosd.py -# TODO privilege has error -# python3 ./test.py -f 0-others/user_control.py +python3 ./test.py -f 0-others/user_control.py +python3 ./test.py -f 0-others/fsync.py -#python3 ./test.py -f 2-query/between.py +python3 ./test.py -f 1-insert/opentsdb_telnet_line_taosc_insert.py + +python3 ./test.py -f 2-query/between.py python3 ./test.py -f 2-query/distinct.py python3 ./test.py -f 2-query/varchar.py python3 ./test.py -f 2-query/ltrim.py @@ -22,13 +26,18 @@ python3 ./test.py -f 2-query/char_length.py python3 ./test.py -f 2-query/upper.py python3 ./test.py -f 2-query/lower.py python3 ./test.py -f 2-query/join.py +python3 ./test.py -f 2-query/join2.py python3 ./test.py -f 2-query/cast.py +python3 ./test.py -f 2-query/union.py +python3 ./test.py -f 2-query/union1.py python3 ./test.py -f 2-query/concat.py +python3 ./test.py -f 2-query/concat2.py python3 ./test.py -f 2-query/concat_ws.py -# python3 ./test.py -f 2-query/union.py -# python3 ./test.py -f 2-query/union2.py -# python3 ./test.py -f 2-query/union3.py -# python3 ./test.py -f 2-query/union4.py +python3 ./test.py -f 2-query/concat_ws2.py +python3 ./test.py -f 2-query/check_tsdb.py +python3 ./test.py -f 2-query/spread.py +python3 ./test.py -f 2-query/hyperloglog.py + python3 ./test.py -f 2-query/timezone.py python3 ./test.py -f 2-query/Now.py @@ -37,12 +46,15 @@ python3 ./test.py -f 2-query/max.py python3 ./test.py -f 2-query/min.py python3 ./test.py -f 2-query/count.py python3 ./test.py -f 2-query/last.py -#python3 ./test.py -f 2-query/To_iso8601.py +python3 ./test.py -f 2-query/first.py +python3 ./test.py -f 2-query/To_iso8601.py python3 ./test.py -f 2-query/To_unixtimestamp.py python3 ./test.py -f 2-query/timetruncate.py -# python3 ./test.py -f 2-query/diff.py +python3 ./test.py -f 2-query/diff.py python3 ./test.py -f 2-query/Timediff.py -#python3 ./test.py -f 2-query/cast.py + +python3 ./test.py -f 2-query/top.py +python3 ./test.py -f 2-query/bottom.py python3 ./test.py -f 2-query/abs.py @@ -59,10 +71,24 @@ python3 ./test.py -f 2-query/arcsin.py python3 ./test.py -f 2-query/arccos.py python3 ./test.py -f 2-query/arctan.py python3 ./test.py -f 2-query/query_cols_tags_and_or.py -python3 ./test.py -f 2-query/nestedQuery.py +# python3 ./test.py -f 2-query/nestedQuery.py +python3 ./test.py -f 2-query/nestedQuery_str.py +python3 ./test.py -f 2-query/avg.py +python3 ./test.py -f 2-query/elapsed.py +python3 ./test.py -f 2-query/csum.py +python3 ./test.py -f 2-query/mavg.py +python3 ./test.py -f 2-query/diff.py +python3 ./test.py -f 2-query/sample.py +python3 ./test.py -f 2-query/function_diff.py python3 ./test.py -f 7-tmq/basic5.py python3 ./test.py -f 7-tmq/subscribeDb.py +python3 ./test.py -f 7-tmq/subscribeDb0.py python3 ./test.py -f 7-tmq/subscribeDb1.py python3 ./test.py -f 7-tmq/subscribeStb.py +python3 ./test.py -f 7-tmq/subscribeStb0.py python3 ./test.py -f 7-tmq/subscribeStb1.py +python3 ./test.py -f 7-tmq/subscribeStb2.py +python3 ./test.py -f 7-tmq/subscribeStb3.py +python3 ./test.py -f 7-tmq/subscribeStb4.py +python3 ./test.py -f 7-tmq/subscribeStb2.py \ No newline at end of file diff --git a/tests/system-test/test-all.bat b/tests/system-test/test-all.bat new file mode 100644 index 0000000000000000000000000000000000000000..ae6c98b06f3504b20e712630d40184b093143835 --- /dev/null +++ b/tests/system-test/test-all.bat @@ -0,0 +1,25 @@ +@echo off +SETLOCAL EnableDelayedExpansion +for /F "tokens=1,2 delims=#" %%a in ('"prompt #$H#$E# & echo on & for %%b in (1) do rem"') do ( set "DEL=%%a") +set /a a=0 +@REM echo Windows Taosd Test +@REM for /F "usebackq tokens=*" %%i in (fulltest.bat) do ( +@REM echo Processing %%i +@REM set /a a+=1 +@REM call %%i ARG1 > result_!a!.txt 2>error_!a!.txt +@REM if errorlevel 1 ( call :colorEcho 0c "failed" &echo. && exit 8 ) else ( call :colorEcho 0a "Success" &echo. ) +@REM ) +echo Linux Taosd Test +for /F "usebackq tokens=*" %%i in (fulltest.bat) do ( + echo Processing %%i + set /a a+=1 + call %%i ARG1 -m %1 > result_!a!.txt 2>error_!a!.txt + if errorlevel 1 ( call :colorEcho 0c "failed" &echo. && exit 8 ) else ( call :colorEcho 0a "Success" &echo. ) +) +exit + +:colorEcho +echo off + "%~2" +findstr /v /a:%1 /R "^$" "%~2" nul +del "%~2" > nul 2>&1i \ No newline at end of file diff --git a/tests/system-test/test.py b/tests/system-test/test.py index 31afd027ec3e53713479a402b0eb92fbf2e61db8..6b6487918cd863dfc0d7a07027276c04c94f3617 100644 --- a/tests/system-test/test.py +++ b/tests/system-test/test.py @@ -17,6 +17,9 @@ import sys import getopt import subprocess import time +import base64 +import json +import platform from distutils.log import warn as printf from fabric2 import Connection sys.path.append("../pytest") @@ -38,8 +41,12 @@ if __name__ == "__main__": stop = 0 restart = False windows = 0 - opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrw', [ - 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'windows']) + if platform.system().lower() == 'windows': + windows = 1 + updateCfgDict = {} + execCmd = "" + opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:e:', [ + 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'execCmd']) for key, value in opts: if key in ['-h', '--help']: tdLog.printNoPrefix( @@ -52,7 +59,8 @@ if __name__ == "__main__": tdLog.printNoPrefix('-c Test Cluster Flag') tdLog.printNoPrefix('-g valgrind Test Flag') tdLog.printNoPrefix('-r taosd restart test') - tdLog.printNoPrefix('-w taos on windows') + tdLog.printNoPrefix('-d update cfg dict, base64 json str') + tdLog.printNoPrefix('-e eval str to run') sys.exit(0) if key in ['-r', '--restart']: @@ -85,8 +93,24 @@ if __name__ == "__main__": if key in ['-s', '--stop']: stop = 1 - if key in ['-w', '--windows']: - windows = 1 + if key in ['-d', '--updateCfgDict']: + try: + updateCfgDict = eval(base64.b64decode(value.encode()).decode()) + except: + print('updateCfgDict convert fail.') + sys.exit(0) + + if key in ['-e', '--execCmd']: + try: + execCmd = base64.b64decode(value.encode()).decode() + except: + print('updateCfgDict convert fail.') + sys.exit(0) + + if not execCmd == "": + tdDnodes.init(deployPath) + exec(execCmd) + quit() if (stop != 0): if (valgrind == 0): @@ -121,23 +145,51 @@ if __name__ == "__main__": if masterIp == "": host = '127.0.0.1' else: - host = masterIp + try: + config = eval(masterIp) + host = config["host"] + except Exception as r: + host = masterIp tdLog.info("Procedures for tdengine deployed in %s" % (host)) if windows: tdCases.logSql(logSql) tdLog.info("Procedures for testing self-deployment") - td_clinet = TDSimClient("C:\\TDengine") - td_clinet.deploy() - remote_conn = Connection("root@%s"%host) - with remote_conn.cd('/var/lib/jenkins/workspace/TDinternal/community/tests/pytest'): - remote_conn.run("python3 ./test.py") + tdDnodes.init(deployPath, masterIp) + tdDnodes.setTestCluster(testCluster) + tdDnodes.setValgrind(valgrind) + tdDnodes.stopAll() + key_word = 'tdCases.addWindows' + is_test_framework = 0 + try: + if key_word in open(fileName).read(): + is_test_framework = 1 + except: + pass + updateCfgDictStr = '' + if is_test_framework: + moduleName = fileName.replace(".py", "").replace(os.sep, ".") + uModule = importlib.import_module(moduleName) + try: + ucase = uModule.TDTestCase() + if ((json.dumps(updateCfgDict) == '{}') and (ucase.updatecfgDict is not None)): + updateCfgDict = ucase.updatecfgDict + updateCfgDictStr = "-d %s"%base64.b64encode(json.dumps(updateCfgDict).encode()).decode() + except : + pass + else: + pass + tdDnodes.deploy(1,updateCfgDict) + tdDnodes.start(1) conn = taos.connect( host="%s"%(host), - config=td_clinet.cfgDir) - tdCases.runOneWindows(conn, fileName) + config=tdDnodes.sim.getCfgDir()) + if is_test_framework: + tdCases.runOneWindows(conn, fileName) + else: + tdCases.runAllWindows(conn) else: - tdDnodes.init(deployPath) + tdDnodes.init(deployPath, masterIp) tdDnodes.setTestCluster(testCluster) tdDnodes.setValgrind(valgrind) tdDnodes.stopAll() @@ -153,16 +205,13 @@ if __name__ == "__main__": uModule = importlib.import_module(moduleName) try: ucase = uModule.TDTestCase() - tdDnodes.deploy(1,ucase.updatecfgDict) - except : - tdDnodes.deploy(1,{}) - else: - pass - tdDnodes.deploy(1,{}) + if (json.dumps(updateCfgDict) == '{}'): + updateCfgDict = ucase.updatecfgDict + except: + pass + tdDnodes.deploy(1,updateCfgDict) tdDnodes.start(1) - - tdCases.logSql(logSql) if testCluster: diff --git a/tests/test/c/CMakeLists.txt b/tests/test/c/CMakeLists.txt index 964f9fee4abbdc244b83f50390c2b660be6b476c..505c290f2a7b45cdba530fcfbad3e42adefdea90 100644 --- a/tests/test/c/CMakeLists.txt +++ b/tests/test/c/CMakeLists.txt @@ -23,20 +23,18 @@ target_link_libraries( PUBLIC os ) -if(NOT TD_WINDOWS) - add_executable(sdbDump sdbDump.c) - target_link_libraries( - sdbDump - PUBLIC dnode - PUBLIC mnode - PUBLIC sdb - PUBLIC os - ) - target_include_directories( - sdbDump - PUBLIC "${TD_SOURCE_DIR}/include/dnode/mnode" - PRIVATE "${TD_SOURCE_DIR}/source/dnode/mnode/impl/inc" - PRIVATE "${TD_SOURCE_DIR}/source/dnode/mnode/sdb/inc" - PRIVATE "${TD_SOURCE_DIR}/source/dnode/mgmt/node_mgmt/inc" - ) -ENDIF () \ No newline at end of file +add_executable(sdbDump sdbDump.c) +target_link_libraries( + sdbDump + PUBLIC dnode + PUBLIC mnode + PUBLIC sdb + PUBLIC os +) +target_include_directories( + sdbDump + PUBLIC "${TD_SOURCE_DIR}/include/dnode/mnode" + PRIVATE "${TD_SOURCE_DIR}/source/dnode/mnode/impl/inc" + PRIVATE "${TD_SOURCE_DIR}/source/dnode/mnode/sdb/inc" + PRIVATE "${TD_SOURCE_DIR}/source/dnode/mgmt/node_mgmt/inc" +) \ No newline at end of file diff --git a/tests/test/c/sdbDump.c b/tests/test/c/sdbDump.c index 2bc60f777ca99460afc4306d3fa8b13cb25e9003..13152968b9e89b952ae4545ee66e60393630ca3d 100644 --- a/tests/test/c/sdbDump.c +++ b/tests/test/c/sdbDump.c @@ -16,14 +16,17 @@ #define _DEFAULT_SOURCE #include "dmMgmt.h" #include "mndInt.h" -#include "sdbInt.h" +#include "sdb.h" #include "tconfig.h" #include "tjson.h" -#define TMP_SDB_DATA_DIR "/tmp/dumpsdb" -#define TMP_SDB_MNODE_DIR "/tmp/dumpsdb/mnode" -#define TMP_SDB_FILE "/tmp/dumpsdb/mnode/data/sdb.data" -#define TMP_SDB_PATH "/tmp/dumpsdb/mnode/data" +#define TMP_DNODE_DIR TD_TMP_DIR_PATH "dumpsdb" +#define TMP_MNODE_DIR TD_TMP_DIR_PATH "dumpsdb" TD_DIRSEP "mnode" +#define TMP_SDB_DATA_DIR TD_TMP_DIR_PATH "dumpsdb" TD_DIRSEP "mnode" TD_DIRSEP "data" +#define TMP_SDB_SYNC_DIR TD_TMP_DIR_PATH "dumpsdb" TD_DIRSEP "mnode" TD_DIRSEP "sync" +#define TMP_SDB_DATA_FILE TD_TMP_DIR_PATH "dumpsdb" TD_DIRSEP "mnode" TD_DIRSEP "data" TD_DIRSEP "sdb.data" +#define TMP_SDB_RAFT_CFG_FILE TD_TMP_DIR_PATH "dumpsdb" TD_DIRSEP "mnode" TD_DIRSEP "sync" TD_DIRSEP "raft_config.json" +#define TMP_SDB_RAFT_STORE_FILE TD_TMP_DIR_PATH "dumpsdb" TD_DIRSEP "mnode" TD_DIRSEP "sync" TD_DIRSEP "raft_store.json" void reportStartup(const char *name, const char *desc) {} @@ -107,7 +110,6 @@ void dumpStb(SSdb *pSdb, SJson *json) { tjsonAddStringToObject(item, "updateTime", i642str(pObj->updateTime)); tjsonAddStringToObject(item, "uid", i642str(pObj->uid)); tjsonAddStringToObject(item, "dbUid", i642str(pObj->dbUid)); - tjsonAddIntegerToObject(item, "version", pObj->version); tjsonAddIntegerToObject(item, "tagVer", pObj->tagVer); tjsonAddIntegerToObject(item, "colVer", pObj->colVer); tjsonAddIntegerToObject(item, "nextColId", pObj->nextColId); @@ -262,7 +264,7 @@ void dumpCluster(SSdb *pSdb, SJson *json) { } void dumpTrans(SSdb *pSdb, SJson *json) { - void *pIter = NULL; + void *pIter = NULL; SJson *items = tjsonCreateObject(); tjsonAddItemToObject(json, "transactions", items); @@ -294,6 +296,7 @@ void dumpTrans(SSdb *pSdb, SJson *json) { void dumpHeader(SSdb *pSdb, SJson *json) { tjsonAddIntegerToObject(json, "sver", 1); tjsonAddStringToObject(json, "curVer", i642str(pSdb->curVer)); + tjsonAddStringToObject(json, "curTerm", i642str(pSdb->curTerm)); SJson *maxIdsJson = tjsonCreateObject(); tjsonAddItemToObject(json, "maxIds", maxIdsJson); @@ -317,6 +320,10 @@ void dumpHeader(SSdb *pSdb, SJson *json) { } int32_t dumpSdb() { + wDebugFlag = 0; + mDebugFlag = 0; + sDebugFlag = 0; + SMsgCb msgCb = {0}; msgCb.reportStartupFp = reportStartup; msgCb.sendReqFp = sendReq; @@ -324,9 +331,10 @@ int32_t dumpSdb() { msgCb.mgmt = (SMgmtWrapper *)(&msgCb); // hack tmsgSetDefault(&msgCb); walInit(); + syncInit(); SMnodeOpt opt = {.msgCb = msgCb}; - SMnode *pMnode = mndOpen(TMP_SDB_MNODE_DIR, &opt); + SMnode *pMnode = mndOpen(TMP_MNODE_DIR, &opt); if (pMnode == NULL) return -1; SSdb *pSdb = pMnode->pSdb; @@ -368,13 +376,11 @@ int32_t dumpSdb() { taosCloseFile(&pFile); tjsonDelete(json); taosMemoryFree(pCont); - taosRemoveDir(TMP_SDB_DATA_DIR); + taosRemoveDir(TMP_DNODE_DIR); return 0; } int32_t parseArgs(int32_t argc, char *argv[]) { - char file[PATH_MAX] = {0}; - for (int32_t i = 1; i < argc; ++i) { if (strcmp(argv[i], "-c") == 0) { if (i < argc - 1) { @@ -387,20 +393,8 @@ int32_t parseArgs(int32_t argc, char *argv[]) { printf("'-c' requires a parameter, default is %s\n", configDir); return -1; } - } else if (strcmp(argv[i], "-f") == 0) { - if (i < argc - 1) { - if (strlen(argv[++i]) >= PATH_MAX) { - printf("file path overflow"); - return -1; - } - tstrncpy(file, argv[i], PATH_MAX); - } else { - printf("'-f' requires a parameter, default is %s\n", configDir); - return -1; - } } else { printf("-c Configuration directory. \n"); - printf("-f Input sdb.data file. \n"); return -1; } } @@ -415,13 +409,39 @@ int32_t parseArgs(int32_t argc, char *argv[]) { return -1; } - if (file[0] == 0) { - snprintf(file, PATH_MAX, "%s/mnode/data/sdb.data", tsDataDir); - } - - strcpy(tsDataDir, TMP_SDB_DATA_DIR); - taosMulMkDir(TMP_SDB_PATH); - taosCopyFile(file, TMP_SDB_FILE); + char dataFile[PATH_MAX] = {0}; + char raftCfgFile[PATH_MAX] = {0}; + char raftStoreFile[PATH_MAX] = {0}; + snprintf(dataFile, PATH_MAX, "%s" TD_DIRSEP "mnode" TD_DIRSEP "data" TD_DIRSEP "sdb.data", tsDataDir); + snprintf(raftCfgFile, PATH_MAX, "%s" TD_DIRSEP "mnode" TD_DIRSEP "sync" TD_DIRSEP "raft_config.json", tsDataDir); + snprintf(raftStoreFile, PATH_MAX, "%s" TD_DIRSEP "mnode" TD_DIRSEP "sync" TD_DIRSEP "raft_store.json", tsDataDir); + + char cmd[PATH_MAX * 2] = {0}; + snprintf(cmd, sizeof(cmd), "rm -rf %s", TMP_DNODE_DIR); + system(cmd); +#ifdef WINDOWS + taosMulMkDir(TMP_SDB_DATA_DIR); + taosMulMkDir(TMP_SDB_SYNC_DIR); + snprintf(cmd, sizeof(cmd), "cp %s %s 2>nul", dataFile, TMP_SDB_DATA_FILE); + system(cmd); + snprintf(cmd, sizeof(cmd), "cp %s %s 2>nul", raftCfgFile, TMP_SDB_RAFT_CFG_FILE); + system(cmd); + snprintf(cmd, sizeof(cmd), "cp %s %s 2>nul", raftStoreFile, TMP_SDB_RAFT_STORE_FILE); + system(cmd); +#else + snprintf(cmd, sizeof(cmd), "mkdir -p %s", TMP_SDB_DATA_DIR); + system(cmd); + snprintf(cmd, sizeof(cmd), "mkdir -p %s", TMP_SDB_SYNC_DIR); + system(cmd); + snprintf(cmd, sizeof(cmd), "cp %s %s 2>/dev/null", dataFile, TMP_SDB_DATA_FILE); + system(cmd); + snprintf(cmd, sizeof(cmd), "cp %s %s 2>/dev/null", raftCfgFile, TMP_SDB_RAFT_CFG_FILE); + system(cmd); + snprintf(cmd, sizeof(cmd), "cp %s %s 2>/dev/null", raftStoreFile, TMP_SDB_RAFT_STORE_FILE); + system(cmd); +#endif + + strcpy(tsDataDir, TMP_DNODE_DIR); return 0; } diff --git a/tests/test/c/tmqSim.c b/tests/test/c/tmqSim.c index e0f58d052f612fca7ad0a257d8e137d8d4a5a1f6..accd1dd080ec21a33cde9d803a4c4e361cb96b16 100644 --- a/tests/test/c/tmqSim.c +++ b/tests/test/c/tmqSim.c @@ -321,9 +321,16 @@ int32_t saveConsumeResult(SThreadInfo* pInfo) { TAOS* pConn = taos_connect(NULL, "root", "taosdata", NULL, 0); assert(pConn != NULL); + int64_t now = taosGetTimestampMs(); + // schema: ts timestamp, consumerid int, consummsgcnt bigint, checkresult int - sprintf(sqlStr, "insert into %s.consumeresult values (now, %d, %" PRId64 ", %" PRId64 ", %d)", g_stConfInfo.cdbName, - pInfo->consumerId, pInfo->consumeMsgCnt, pInfo->consumeRowCnt, pInfo->checkresult); + sprintf(sqlStr, "insert into %s.consumeresult values (%"PRId64", %d, %" PRId64 ", %" PRId64 ", %d)", + g_stConfInfo.cdbName, + now, + pInfo->consumerId, + pInfo->consumeMsgCnt, + pInfo->consumeRowCnt, + pInfo->checkresult); char tmpString[128]; taosFprintfFile(g_fp, "%s, consume id %d result: %s\n", getCurrentTimeString(tmpString), pInfo->consumerId ,sqlStr); diff --git a/tools/shell/CMakeLists.txt b/tools/shell/CMakeLists.txt index 284693795ee471ad2d631758970c3033dc8e0c6c..295fae68b30e85e787a7b8b491a354bcc3125709 100644 --- a/tools/shell/CMakeLists.txt +++ b/tools/shell/CMakeLists.txt @@ -1,9 +1,13 @@ aux_source_directory(src SHELL_SRC) add_executable(shell ${SHELL_SRC}) +if(TD_WINDOWS) + target_link_libraries(shell PUBLIC taos_static) +else() + target_link_libraries(shell PUBLIC taos) +endif () target_link_libraries( shell - PUBLIC taos PRIVATE os common transport util ) target_include_directories( diff --git a/tools/shell/src/shellArguments.c b/tools/shell/src/shellArguments.c index 1639fd1ca681ab0bb980b2fd1fca8b34d58e15f3..cd6613b17a2d6d62ef4ce969b27004f1b5e7df72 100644 --- a/tools/shell/src/shellArguments.c +++ b/tools/shell/src/shellArguments.c @@ -36,6 +36,8 @@ #define SHELL_VERSION "Print program version." #define SHELL_EMAIL "" +static int32_t shellParseSingleOpt(int32_t key, char *arg); + void shellPrintHelp() { char indent[] = " "; printf("Usage: taos [OPTION...] \n\n"); @@ -90,6 +92,21 @@ static struct argp_option shellOptions[] = { {0}, }; +static error_t shellParseOpt(int32_t key, char *arg, struct argp_state *state) { return shellParseSingleOpt(key, arg); } + +static struct argp shellArgp = {shellOptions, shellParseOpt, "", ""}; + +static void shellParseArgsUseArgp(int argc, char *argv[]) { + argp_program_version = shell.info.programVersion; + argp_parse(&shellArgp, argc, argv, 0, 0, &shell.args); +} + +#endif + +#ifndef ARGP_ERR_UNKNOWN + #define ARGP_ERR_UNKNOWN E2BIG +#endif + static int32_t shellParseSingleOpt(int32_t key, char *arg) { SShellArgs *pArgs = &shell.args; @@ -196,8 +213,8 @@ int32_t shellParseArgsWithoutArgp(int argc, char *argv[]) { } shellParseSingleOpt(key[1], val); i++; - } else if (key[1] == 'p' || key[1] == 'A' || key[1] == 'c' || key[1] == 'r' || key[1] == 'k' || key[1] == 't' || - key[1] == 'V') { + } else if (key[1] == 'p' || key[1] == 'A' || key[1] == 'C' || key[1] == 'r' || key[1] == 'k' || + key[1] == 't' || key[1] == 'V' || key[1] == '?' || key[1] == 1) { shellParseSingleOpt(key[1], NULL); } else { fprintf(stderr, "invalid option %s\n", key); @@ -208,21 +225,10 @@ int32_t shellParseArgsWithoutArgp(int argc, char *argv[]) { return 0; } -static error_t shellParseOpt(int32_t key, char *arg, struct argp_state *state) { return shellParseSingleOpt(key, arg); } - -static struct argp shellArgp = {shellOptions, shellParseOpt, "", ""}; - -static void shellParseArgsUseArgp(int argc, char *argv[]) { - argp_program_version = shell.info.programVersion; - argp_parse(&shellArgp, argc, argv, 0, 0, &shell.args); -} - -#endif - static void shellInitArgs(int argc, char *argv[]) { for (int i = 1; i < argc; i++) { if (strncmp(argv[i], "-p", 2) == 0) { - printf(shell.info.clientVersion, tsOsName, taos_get_client_info()); + // printf(shell.info.clientVersion, tsOsName, taos_get_client_info()); if (strlen(argv[i]) == 2) { printf("Enter password: "); taosSetConsoleEcho(false); @@ -341,7 +347,7 @@ int32_t shellParseArgs(int32_t argc, char *argv[]) { #if defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32) shell.info.osname = "Windows"; snprintf(shell.history.file, TSDB_FILENAME_LEN, "C:/TDengine/%s", SHELL_HISTORY_FILE); - // if (shellParseArgsWithoutArgp(argc, argv) != 0) return -1; + if (shellParseArgsWithoutArgp(argc, argv) != 0) return -1; #elif defined(_TD_DARWIN_64) shell.info.osname = "Darwin"; snprintf(shell.history.file, TSDB_FILENAME_LEN, "%s/%s", getpwuid(getuid())->pw_dir, SHELL_HISTORY_FILE); diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index a866488d3ad2d239c47b3279f506e755737b88bf..851d9a2070b75f7863f8e55f5779e9bac90607db 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -587,6 +587,8 @@ int32_t shellCalcColWidth(TAOS_FIELD *field, int32_t precision) { int32_t width = (int32_t)strlen(field->name); switch (field->type) { + case TSDB_DATA_TYPE_NULL: + return TMAX(4, width); // null case TSDB_DATA_TYPE_BOOL: return TMAX(5, width); // 'false' diff --git a/tools/taos-tools b/tools/taos-tools index 0aad27d725f4ee6b18daf1db0c07d933aed16eea..4d83d8c62973506f760bcaa3a33f4665ed9046d0 160000 --- a/tools/taos-tools +++ b/tools/taos-tools @@ -1 +1 @@ -Subproject commit 0aad27d725f4ee6b18daf1db0c07d933aed16eea +Subproject commit 4d83d8c62973506f760bcaa3a33f4665ed9046d0
10.3 219 0.31Beijing.ChaoyangCalifornia.SanFrancisco 2
10.2 220 0.23Beijing.ChaoyangCalifornia.SanFrancisco 3
11.5 221 0.35Beijing.HaidianCalifornia.LosAngeles 3
13.4 223 0.29Beijing.HaidianCalifornia.LosAngeles 2
12.6 218 0.33Beijing.ChaoyangCalifornia.SanFrancisco 2
11.8 221 0.28Beijing.HaidianCalifornia.LosAngeles 2
10.3 218 0.25Beijing.ChaoyangCalifornia.SanFrancisco 3
12.3 221 0.31Beijing.ChaoyangCalifornia.SanFrancisco 2